Пример #1
0
    def prepare(cls, model, device=None, **kwargs):
        """
        Load the model and creates a :class:`migraphx.program`
        ready to be used as a backend.

        :param model: ModelProto (returned by `onnx.load`),
            string for a filename or bytes for a serialized model
        :param device: requested device for the computation,
            None means the default one which depends on
            the compilation settings
        :param kwargs: see :class:`onnxruntime.SessionOptions`
        :return: :class:`migraphx.program`
        """
        if isinstance(model, MIGraphXBackendRep):
            return model
        elif isinstance(model, migraphx.program):
            return MIGraphXBackendRep(model, cls._input_names)
        elif isinstance(model, (str, bytes)):
            for k, v in kwargs.items():
                if hasattr(options, k):
                    setattr(options, k, v)
            if device is not None and not cls.supports_device(device):
                raise RuntimeError(
                    "Incompatible device expected '{0}', got '{1}'".format(
                        device, get_device()))
            inf = migraphx.parse_onnx_buffer(model)
            device = cls._device
            cls._input_names = inf.get_parameter_names()
            inf.compile(migraphx.get_target(device.lower()))
            return cls.prepare(inf, device, **kwargs)
        else:
            # type: ModelProto
            check_model(model)
            bin = model.SerializeToString()
            return cls.prepare(bin, device, **kwargs)
Пример #2
0
 def _verify_function_set(self, extracted_model, function_set, func_domain):  # type: ignore
     checker.check_model(extracted_model)
     self.assertEqual(len(extracted_model.functions), len(function_set))
     for function in function_set:
         self.assertIsNotNone(
             next((f for f in extracted_model.functions
             if f.name == function and f.domain == func_domain), None))
Пример #3
0
 def _test_merge_models(self,
                        m1def: str,
                        m2def: str,
                        io_map: List[Tuple[str, str]],
                        check_expectations: Callable[
                            [GraphProto, GraphProto, GraphProto], None],
                        inputs: Optional[List[str]] = None,
                        outputs: Optional[List[str]] = None,
                        prefix1: Optional[str] = None,
                        prefix2: Optional[str] = None) -> None:
     m1, m2 = _load_model(m1def), _load_model(m2def)
     g3 = compose.merge_graphs(
         m1.graph,
         m2.graph,
         io_map=io_map,
         inputs=inputs,
         outputs=outputs,
         prefix1=prefix1,
         prefix2=prefix2,
     )
     checker.check_graph(g3)
     check_expectations(m1.graph, m2.graph, g3)
     m3 = compose.merge_models(
         m1,
         m2,
         io_map=io_map,
         inputs=inputs,
         outputs=outputs,
         prefix1=prefix1,
         prefix2=prefix2,
     )
     checker.check_model(m3)
     check_expectations(m1.graph, m2.graph, m3.graph)
Пример #4
0
def polish_model(model, internals=True, extras=True, checking=True):
    """
    polish_model enhanced for inference
    """

    if checking:
        check_model(model)
    strip_doc_string(model)
    if internals:
        passes = optimizer.get_available_passes()
        passes = list(
            filter(lambda name: not name.startswith('split_'), passes))  #
        logger.debug('builtin optimizations to perform in ONNX:\n\t%s', passes)
        model = optimizer.optimize(model, passes=passes)
    if extras:
        for optimize in (
                optimize_model_skip_op_for_inference,
                optimize_model_strip_initializer,
                optimize_model_cast,
                optimize_model_slice,
        ):
            model = optimize(model)
    model = infer_shapes(model)
    if checking:
        check_model(model)
    return model
Пример #5
0
def check_onnx_model(onnx_model, external_converters, external_opset_imports):
    try:
        checker.check_model(onnx_model)
    except onnx.checker.ValidationError as e:
        if external_converters is None:
            raise e
        else:
            # ONNX version >= 1.5: default checker skips schema check when
            # non standard domain is set. In ONNX-Chainer, external ops without
            # doamin is also accepted, but show warning.
            # ONNX version < 1.5: the checker does not skip schema check
            # regardless domain is set or not. In ONNX-Chainer, ignore
            # errors when external ops are set.
            if is_support_non_standard_domain():
                if external_opset_imports:
                    raise e
                else:
                    warnings.warn(
                        'ValidationError is occurred but ignored. '
                        'ONNX-Chainer recommends to set '
                        '`external_opset_imports` when using '
                        '`external_converters` on exporting. Please take care '
                        'about ONNX format check is insufficient. Error '
                        'message:\n{}'.format(str(e)), UserWarning)
            else:
                warnings.warn(
                    'ValidationError is occurred but ignored because '
                    'exporting with `external_converters`. Please take care '
                    'about ONNX format check is insufficient. Error '
                    'message:\n{}'.format(str(e)), UserWarning)
Пример #6
0
def _load_model(m_def: str) -> ModelProto:
    '''
    Parses a model from a string representation, including checking the model for correctness
    '''
    m = parser.parse_model(m_def)
    checker.check_model(m)
    return m
Пример #7
0
def save_model(model):

    model.graph.value_info.append(
        make_tensor_value_info(
            model.graph.input[0].name, TensorProto.FLOAT,
            proto_val_to_dimension_tuple(model.graph.input[0])))
    model.graph.value_info.append(
        make_tensor_value_info(
            model.graph.output[0].name, TensorProto.FLOAT,
            proto_val_to_dimension_tuple(model.graph.output[0])))

    for init_vals in model.graph.initializer:
        model.graph.value_info.append(
            make_tensor_value_info(init_vals.name, TensorProto.FLOAT,
                                   tuple(init_vals.dims)))

    # print(model.graph.value_info)
    # print("**************************************************************************")
    inferred_model = onnx.shape_inference.infer_shapes(model)

    print(inferred_model.graph.value_info)

    checker.check_model(model)

    onnx.save(model, 'models/' + model_name + '.onnx')
Пример #8
0
    def prepare(cls, predict_model, device='CPU',
                init_model=None, **kwargs):
        '''
        For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,

        for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
        initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
        there is no way we can know which blob is the input of the predict_graph.
        '''
        super(Caffe2Backend, cls).prepare(predict_model, device, **kwargs)

        if init_model:
            checker.check_model(init_model)

        init_net, predict_net = cls.onnx_graph_to_caffe2_net(predict_model.graph)
        predict_net.device_option.CopyFrom(get_device_option(Device(device)))

        ws = Workspace()
        with ws, core.DeviceScope(predict_net.device_option):
            if init_model:
               _, init_net_from_model = cls.onnx_graph_to_caffe2_net(init_model.graph)
               init_net.op.extend(init_net_from_model.op)
            workspace.RunNetOnce(init_net)
            uninitialized = [x
                             for x in predict_net.external_input
                             if not workspace.HasBlob(x)]

        return Caffe2Rep(predict_net, ws, uninitialized)
Пример #9
0
 def _test_merge_models(
     self,
     m1def,  # type: Text
     m2def,  # type: Text
     io_map,  # type: List[Tuple[Text, Text]]
     check_expectations,  # type: Callable[[GraphProto, GraphProto, GraphProto], None]
     inputs=None,  # type: Optional[List[Text]]
     outputs=None,  # type: Optional[List[Text]]
     prefix1=None,  # type: Optional[Text]
     prefix2=None  # type: Optional[Text]
 ):  # type: (...) -> None
     m1, m2 = _load_model(m1def), _load_model(m2def)
     g3 = compose.merge_graphs(
         m1.graph,
         m2.graph,
         io_map=io_map,
         inputs=inputs,
         outputs=outputs,
         prefix1=prefix1,
         prefix2=prefix2,
     )
     checker.check_graph(g3)
     check_expectations(m1.graph, m2.graph, g3)
     m3 = compose.merge_models(
         m1,
         m2,
         io_map=io_map,
         inputs=inputs,
         outputs=outputs,
         prefix1=prefix1,
         prefix2=prefix2,
     )
     checker.check_model(m3)
     check_expectations(m1.graph, m2.graph, m3.graph)
Пример #10
0
    def prepare(cls, model, device=None, **kwargs):
        """
        Load the model and creates a :class:`onnxruntime.InferenceSession`
        ready to be used as a backend.

        :param model: ModelProto (returned by `onnx.load`),
            string for a filename or bytes for a serialized model
        :param device: requested device for the computation,
            None means the default one which depends on
            the compilation settings
        :param kwargs: see :class:`onnxruntime.SessionOptions`
        :return: :class:`onnxruntime.InferenceSession`
        """
        if isinstance(model, OnnxRuntimeBackendRep):
            return model
        elif isinstance(model, InferenceSession):
            return OnnxRuntimeBackendRep(model)
        elif isinstance(model, (str, bytes)):
            options = SessionOptions()
            for k, v in kwargs.items():
                if hasattr(options, k):
                    setattr(options, k, v)
            inf = InferenceSession(model, options)
            # backend API is primarily used for ONNX test/validation. As such, we should disable session.run() fallback
            # which may hide test failures.
            inf.disable_fallback()
            if device is not None and not cls.supports_device(device):
                raise RuntimeError("Incompatible device expected '{0}', got '{1}'".format(device, get_device()))
            return cls.prepare(inf, device, **kwargs)
        else:
            # type: ModelProto
            check_model(model)
            bin = model.SerializeToString()
            return cls.prepare(bin, device, **kwargs)
Пример #11
0
def check_model():  # type: () -> None
    parser = argparse.ArgumentParser('check-model')
    parser.add_argument('model_pb', type=argparse.FileType('rb'))
    args = parser.parse_args()

    model = load(args.model_pb)
    checker.check_model(model)
Пример #12
0
def check_model() -> None:
    parser = argparse.ArgumentParser('check-model')
    parser.add_argument('model_pb', type=argparse.FileType('rb'))
    args = parser.parse_args()

    model = load(args.model_pb)
    checker.check_model(model)
Пример #13
0
 def graph_def_to_onnx_model(
     cls,
     graph_def,
     init_func=None,
     constants=None,
     value_info=None,
     graph_name=None,
     verbose=True,
     enforce_no_running=False,
 ):
     opset_id = OperatorSetIdProto()
     opset_id.domain = '' # ONNX default domain
     opset_id.version = cls.target_opset_version
     model = make_model(
         cls.graph_def_to_onnx_graph(
             graph_def,
             init_func,
             constants,
             value_info,
             graph_name,
             verbose,
             enforce_no_running,
         ),
         opset_imports=[opset_id],    # current supported opset version
         producer_name='onnx-dragon', # producer name
     )
     checker.check_model(model)
     return model
Пример #14
0
 def caffe2_net_to_onnx_model(cls, *args, **kwargs):
     model = make_model(cls.caffe2_net_to_onnx_graph(*args, **kwargs))
     opset_id = OperatorSetIdProto()
     opset_id.domain = ''  # ONNX
     opset_id.version = cls._target_opset_version
     model.opset_import.extend([opset_id])
     checker.check_model(model)
     return model
 def _optimized(self, graph, opts):
     orig_model = helper.make_model(graph, producer_name='onnx-test')
     orig_model_str = orig_model.SerializeToString()
     optimized_model_str = onnx.optimizer.optimize(orig_model_str, opts)
     optimized_model = ModelProto()
     optimized_model.ParseFromString(optimized_model_str)
     checker.check_model(optimized_model)
     return optimized_model
Пример #16
0
    def test_check_model(self):  # type: () -> None
        node = helper.make_node("Relu", ["X"], ["Y"], name="test")
        graph = helper.make_graph(
            [node], "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
        model = helper.make_model(graph, producer_name='test')

        checker.check_model(model)
Пример #17
0
 def test_exports(self):
     input_shape = (2,1,3,1)
     for test in export_test_cases:
         test_name, onnx_name, mx_op, attrs = test
         input_sym = mx.sym.var('data')
         outsym = mx_op(input_sym, **attrs)
         converted_model = onnx_mxnet.export_model(outsym, {}, [input_shape], np.float32,
                                                   onnx_file_path=outsym.name + ".onnx")
         model = load_model(converted_model)
         checker.check_model(model)
Пример #18
0
 def caffe2_net_to_onnx_model(cls, *args, **kwargs):
     opset_id = OperatorSetIdProto()
     opset_id.domain = ''  # ONNX default domain
     opset_id.version = cls.target_opset_version
     model = make_model(cls.caffe2_net_to_onnx_graph(*args, **kwargs),
                        opset_imports=[opset_id],  # current supported opset version
                        producer_name='onnx-caffe2',  # producer name
                        )
     checker.check_model(model)
     return model
Пример #19
0
 def caffe2_net_to_onnx_model(cls, *args, **kwargs):
     opset_id = OperatorSetIdProto()
     opset_id.domain = ''  # ONNX default domain
     opset_id.version = cls.target_opset_version
     model = make_model(cls.caffe2_net_to_onnx_graph(*args, **kwargs),
                        opset_imports=[opset_id],  # current supported opset version
                        producer_name='onnx-caffe2',  # producer name
                        )
     checker.check_model(model)
     return model
Пример #20
0
  def do_test_expected(self):
    tf.reset_default_graph()
    tf_op = test_data[1]
    output_name = test_data[2]
    inputs = test_data[3]
    attrs = test_data[4]

    # Now construct input feed dict
    # keyed by input name
    onnx_feed_dict = {}
    # keyed by placeholder op
    tf_feed_dict = {}
    tf_param_list = []
    for idx, input_tensor in enumerate(inputs):
      if type(input_tensor) is np.ndarray:
        placeholder = tf.placeholder(
            input_tensor.dtype, shape=input_tensor.shape, name="in_" + str(idx))
        onnx_feed_dict["in_" + str(idx)] = input_tensor
        tf_feed_dict[placeholder] = input_tensor
        tf_param_list.append(placeholder)
      else:
        tf_param_list.append(input_tensor)
    test_op = tf_op(*tf_param_list, **attrs)
    tf_graph = tf.get_default_graph().as_graph_def(add_shapes=True)
    # Construct onnx graph, run with backend.
    onnx_model = tensorflow_graph_to_onnx_model(
        tf_graph,
        output_name,
        ignore_unimplemented=test_option.get("ignore_unimplemented", False))
    if not test_option.get("ignore_unimplemented", False):
      checker.check_model(onnx_model)
      backend_rep = prepare(onnx_model)
      backend_output = []
      backend_rep_outputs = backend_rep.run(onnx_feed_dict)
      for output in backend_rep.outputs:
        backend_output.append(backend_rep_outputs[output])
      backend_output = np.asarray(backend_output)
      backend_output = np.squeeze(
          backend_output, 0) if backend_output.shape[0] == 1 else backend_output

      with tf.Session() as sess:
        tf_output = sess.run(test_op, tf_feed_dict)

      # make sure backend_output and tf_output are Iterable
      if backend_output.ndim == 0:
        backend_output = backend_output.reshape(1)
      if isinstance(tf_output, Iterable) == False:
        tf_output = [tf_output]

      # skip comparison if test_option specifies that
      # the test is call only.
      if test_option.get("call_only", False):
        return
      for backend_o, tf_o in zip(backend_output, tf_output):
        np.testing.assert_allclose(backend_o, tf_o, rtol=1e-3, atol=1e-7)
Пример #21
0
    def test_check_serialized_model(self) -> None:
        node = helper.make_node(
            "Relu", ["X"], ["Y"], name="test")
        graph = helper.make_graph(
            [node],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
        model = helper.make_model(graph, producer_name='test')

        checker.check_model(model.SerializeToString())
Пример #22
0
 def test_exports(self):
     for test in export_test_cases:
         test_name, onnx_name, mx_op, input_shape, attrs = test
         input_sym = mx.sym.var('data')
         outsym = mx_op(input_sym, **attrs)
         converted_model = onnx_mxnet.export_model(
             outsym, {}, [input_shape],
             np.float32,
             onnx_file_path=outsym.name + ".onnx")
         model = load_model(converted_model)
         checker.check_model(model)
Пример #23
0
    def test_check_model(self):  # type: () -> None
        node = helper.make_node(
            "Relu", ["X"], ["Y"], name="test")
        graph = helper.make_graph(
            [node],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
        model = helper.make_model(graph, producer_name='test')

        checker.check_model(model)
Пример #24
0
def onnx_save_model(graph, fname, producer=None):
    checker.check_graph(graph)
    if producer:
        model_proto = helper.make_model(graph, producer=producer)
    else:
        model_proto = helper.make_model(graph)
    change_input_dim(model_proto)
    checker.check_model(model_proto)
    model_string = model_proto.SerializeToString()

    with open(fname, 'wb') as f:
        f.write(model_string)
Пример #25
0
    def test_model_metadata_props(self):  # type: () -> None
        graph = helper.make_graph([], "my graph", [], [])
        model_def = helper.make_model(graph, doc_string='test')
        helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
        checker.check_model(model_def)
        helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
        checker.check_model(model_def)  # helper replaces, so no dupe

        dupe = model_def.metadata_props.add()
        dupe.key = 'Title'
        dupe.value = 'Other'
        self.assertRaises(checker.ValidationError, checker.check_model, model_def)
Пример #26
0
 def test_skip_schema_check_on_non_standard_domain(self):  # type: () -> None
     node = helper.make_node(
         "NonExistOp", ["X"], ["Y"], name="test", domain="test.domain")
     graph = helper.make_graph(
         [node],
         "test",
         [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
         [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
     onnx_id = helper.make_opsetid("test.domain", 1)
     model = helper.make_model(graph, producer_name='test',
                               opset_imports=[onnx_id])
     checker.check_model(model)
Пример #27
0
    def test_check_old_model(self):  # type: () -> None
        node = helper.make_node(
            "Pad", ["X"], ["Y"], paddings=(0, 0, 0, 0))
        graph = helper.make_graph(
            [node],
            "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
        onnx_id = helper.make_opsetid("", 1)
        model = helper.make_model(graph, producer_name='test', opset_imports=[onnx_id])

        checker.check_model(model)
Пример #28
0
    def test_check_old_model(self):  # type: () -> None
        node = helper.make_node("Pad", ["X"], ["Y"], paddings=(0, 0, 0, 0))
        graph = helper.make_graph(
            [node], "test",
            [helper.make_tensor_value_info("X", TensorProto.FLOAT, [1, 2])],
            [helper.make_tensor_value_info("Y", TensorProto.FLOAT, [1, 2])])
        onnx_id = helper.make_opsetid("", 1)
        model = helper.make_model(graph,
                                  producer_name='test',
                                  opset_imports=[onnx_id])

        checker.check_model(model)
Пример #29
0
    def test_model_metadata_props(self):  # type: () -> None
        graph = helper.make_graph([], "my graph", [], [])
        model_def = helper.make_model(graph, doc_string='test')
        helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
        checker.check_model(model_def)
        helper.set_model_props(model_def, {'Title': 'my graph', 'Keywords': 'test;graph'})
        checker.check_model(model_def)  # helper replaces, so no dupe

        dupe = model_def.metadata_props.add()
        dupe.key = 'Title'
        dupe.value = 'Other'
        self.assertRaises(checker.ValidationError, checker.check_model, model_def)
Пример #30
0
 def _converted(
         self,
         graph,  # type: GraphProto
         initial_version,  # type: OperatorSetIdProto
         target_version  # type: int
 ):  # type: (...) -> ModelProto
     orig_model = helper.make_model(graph, producer_name='onnx-test', opset_imports=[initial_version])
     # print(type(orig_model))
     converted_model = onnx.version_converter.convert_version(orig_model,
             target_version)
     checker.check_model(converted_model)
     return converted_model
Пример #31
0
    def convert(self):
        onnx_file_name = "test1.onnx"
        name = "test"
        domain = "test.domain"

        #Inputs
        inputs = list()
        inputs.append(
            helper.make_tensor_value_info('X', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('W1', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('B1', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('W2', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('B2', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('W5', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('B5', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('W6', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('B6', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('W9', TensorProto.FLOAT, [1]))
        #         inputs.append(helper.make_tensor_value_info('B9', TensorProto.FLOAT, [1]))

        #Nodes
        nodes = list()
        n = len(self.nodes)
        if n == 0:
            print("Empty Graph")
        else:
            for id, node in self.nodes.items():
                if node.status == False:
                    continue
                if len(node.inputs) == 0:
                    node.inputs.append('X')
                output = node.id
                nodes.append(
                    helper.make_node(node.type,
                                     node.inputs,
                                     node.outputs,
                                     name=name,
                                     domain=domain))

        # Outputs
        outputs = list()
        outputs.append(
            helper.make_tensor_value_info(output, TensorProto.FLOAT, [1]))

        graph = helper.make_graph(nodes, name, inputs, outputs)
        onnx_id = helper.make_opsetid(domain, 1)
        model = helper.make_model(graph,
                                  producer_name=name,
                                  opset_imports=[onnx_id])
        checker.check_model(model)
        print(helper.printable_graph(model.graph))
Пример #32
0
    def prepare(cls, model, device=None, **kwargs):
        """
        Load the model and creates a :class:`onnxruntime.InferenceSession`
        ready to be used as a backend.

        :param model: ModelProto (returned by `onnx.load`),
            string for a filename or bytes for a serialized model
        :param device: requested device for the computation,
            None means the default one which depends on
            the compilation settings
        :param kwargs: see :class:`onnxruntime.SessionOptions`
        :return: :class:`onnxruntime.InferenceSession`
        """
        if isinstance(model, OnnxRuntimeBackendRep):
            return model
        elif isinstance(model, InferenceSession):
            return OnnxRuntimeBackendRep(model)
        elif isinstance(model, (str, bytes)):
            options = SessionOptions()
            for k, v in kwargs.items():
                if hasattr(options, k):
                    setattr(options, k, v)

            excluded_providers = os.getenv("ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS", default="").split(",")
            providers = [x for x in get_available_providers() if (x not in excluded_providers)]

            inf = InferenceSession(model, sess_options=options, providers=providers)
            # backend API is primarily used for ONNX test/validation. As such, we should disable session.run() fallback
            # which may hide test failures.
            inf.disable_fallback()
            if device is not None and not cls.supports_device(device):
                raise RuntimeError("Incompatible device expected '{0}', got '{1}'".format(device, get_device()))
            return cls.prepare(inf, device, **kwargs)
        else:
            # type: ModelProto
            # check_model serializes the model anyways, so serialize the model once here
            # and reuse it below in the cls.prepare call to avoid an additional serialization
            # only works with onnx >= 1.10.0 hence the version check
            onnx_version = tuple(map(int, (version.version.split(".")[:3])))
            onnx_supports_serialized_model_check = onnx_version >= (1, 10, 0)
            bin_or_model = model.SerializeToString() if onnx_supports_serialized_model_check else model
            check_model(bin_or_model)
            opset_supported, error_message = cls.is_opset_supported(model)
            if not opset_supported:
                raise unittest.SkipTest(error_message)
            # Now bin might be serialized, if it's not we need to serialize it otherwise we'll have
            # an infinite recursive call
            bin = bin_or_model
            if not isinstance(bin, (str, bytes)):
                bin = bin.SerializeToString()
            return cls.prepare(bin, device, **kwargs)
Пример #33
0
 def test_exports(self):
     for test in export_test_cases:
         test_name, onnx_name, mx_op, input_shape, attrs = test
         input_sym = mx.sym.var('data')
         if isinstance(mx_op, type) and issubclass(mx_op, (mx.gluon.HybridBlock, mx.gluon.SymbolBlock)):
             mx_op = mx_op(**attrs)
             mx_op.initialize()
             mx_op(mx.nd.zeros(input_shape))
             params = {p.name: p.data() for p in mx_op.collect_params().values()}
             outsym = mx_op(input_sym)
         else:
             params = {}
             outsym = mx_op(input_sym, **attrs)
         converted_model = onnx_mxnet.export_model(outsym, params, [input_shape], np.float32,
                                                   onnx_file_path=outsym.name + ".onnx")
         model = load_model(converted_model)
         checker.check_model(model)
Пример #34
0
    def keras_model_to_onnx_model(cls,
                                  model,
                                  producer_name="onnx-keras",
                                  model_name="keras-model",
                                  model_version=1):

        # TODO save domain, model_version,doc_string

        opset = make_opsetid("", 6)

        model = make_model(cls.keras_graph_to_onnx_graph(model,
                                                         name=model_name),
                           model_version=model_version,
                           producer_name=producer_name,
                           opset_imports=[opset])
        check_model(model)
        return model
Пример #35
0
def convert_model_to_int32(model_path: str, out_path: str):
    """
    convert_model_to_int32 Converts ONNX model with INT64 params to INT32 params.\n

    Args:\n
        model_path (str): path to original ONNX model.\n
        out_path (str): path to save converted model.
    """
    log.info("ONNX INT64 --> INT32 Converter")
    log.info(f"Loading Model: {model_path}")
    # * load model.
    model = onnx.load_model(model_path)
    ch.check_model(model)
    # * get model opset version.
    opset_version = model.opset_import[0].version
    graph = model.graph
    # * The initializer holds all non-constant weights.
    init = graph.initializer
    # * collect model params in a dictionary.
    params_dict = make_param_dictionary(init)
    log.info("Converting INT64 model params to INT32...")
    # * convert all INT64 aprams to INT32.
    converted_params = convert_params_to_int32(params_dict)
    log.info("Converting constant INT64 nodes to INT32...")
    new_nodes = convert_constant_nodes_to_int32(graph.node)

    graph_name = f"{graph.name}-int32"
    log.info("Creating new graph...")
    # * create a new graph with converted params and new nodes.
    graph_int32 = h.make_graph(
        new_nodes,
        graph_name,
        graph.input,
        graph.output,
        initializer=converted_params,
    )
    log.info("Creating new int32 model...")
    model_int32 = h.make_model(graph_int32, producer_name="onnx-typecast")
    model_int32.opset_import[0].version = opset_version
    ch.check_model(model_int32)
    log.info(f"Saving converted model as: {out_path}")
    onnx.save_model(model_int32, out_path)
    log.info(f"Done Done London. 🎉")
    return
Пример #36
0
    def test_error_opset_import_mismatch(self) -> None:
        '''
        Tests that providing models with different operator set imported produces an error
        '''
        m1, m2 = _load_model(m1_def), _load_model(m2_def)
        m1 = helper.make_model(m1.graph,
                               producer_name='test',
                               opset_imports=[helper.make_opsetid("", 10)])
        m2 = helper.make_model(m2.graph,
                               producer_name='test',
                               opset_imports=[helper.make_opsetid("", 15)])

        io_map = [("B00", "B01"), ("B10", "B11"), ("B20", "B21")]
        self.assertRaises(ValueError, compose.merge_models, m1, m2, io_map)

        # Converting to the same Operator set version, should work
        m1 = version_converter.convert_version(m1, 15)
        m3 = compose.merge_models(m1, m2, io_map=io_map)
        checker.check_model(m3)
Пример #37
0
 def _optimized(self, graph, opts):  # type: (GraphProto, Sequence[Text]) -> ModelProto
     orig_model = helper.make_model(graph, producer_name='onnx-test')
     optimized_model = onnx.optimizer.optimize(orig_model, opts)
     checker.check_model(optimized_model)
     return optimized_model
Пример #38
0
def to_onnx_model(inputs, y, model_name="sonnx"):
    """
    get onnx model from singa computational graph
    Args:
        inputs: a list of input tensors (each is initialized with a name)
        y: a list of tensors, usually the outputs of the graph
    Return:
        the onnx model
    """
    assert len(y) == 1  # assume there is only one output
    y = y[0]  
    node = []
    dependency, _ = autograd.infer_dependency(y.creator)

    input_ids = set(id(x) for x in inputs)
    X = []
    for x in inputs:
        dtype = TensorProto.FLOAT
        if y.dtype == tensor.int32:
            dtype = TensorProto.INT
        X.append(helper.make_tensor_value_info(x.name, dtype, x.shape))
    Y = [helper.make_tensor_value_info(y.name, TensorProto.FLOAT, y.shape)]
    ready = deque([y.creator])

    while len(ready) > 0:
        op = ready.pop()
        assert not isinstance(op, autograd.Dummy)
        outputs = [op.output_name(idx) for yid, idx in op.y_id2idx.items()]
        inputs = [
            srcop.output_name(srcop.y_id2idx[yid])
            for (srcop, yid, _, _) in op.src
        ]
        opname = op.name 
        optype = str(op).split(".")[-1].split(" ")[0]
        if isinstance(op, autograd.Concat):
            node.append(
                helper.make_node(
                    "Concat",
                    inputs=inputs,
                    outputs=outputs,
                    name=opname,
                    axis=op.axis,
                )
            )
        elif isinstance(op, autograd._Conv2d):
            pads = [
                op.handle.pad_h,
                op.handle.pad_w,
                op.handle.pad_w,
                op.handle.pad_h,
            ]
            stride = [op.handle.stride_h, op.handle.stride_w]
            k = [op.handle.kernel_h, op.handle.kernel_w]
            node.append(
                helper.make_node(
                    "Conv",
                    inputs=inputs,
                    outputs=outputs,
                    name=opname,
                    kernel_shape=k,
                    pads=pads,
                    strides=stride,
                    group=op.handle.group,
                )
            )
        elif isinstance(op, autograd._Pooling2d):
            k = [op.handle.kernel_h, op.handle.kernel_w]
            s = [op.handle.stride_h, op.handle.stride_w]
            p = [
                op.handle.pad_h,
                op.handle.pad_w,
                op.handle.pad_w,
                op.handle.pad_h,
            ]
            if op.handle.is_max_pooling:
                node.append(
                    helper.make_node(
                        "MaxPool",
                        inputs=inputs,
                        outputs=outputs,
                        name=opname,
                        kernel_shape=k,
                        pads=p,
                        strides=s,
                    )
                )
            else:
                node.append(
                    helper.make_node(
                        "AveragePool",
                        inputs=inputs,
                        outputs=outputs,
                        name=opname,
                        kernel_shape=k,
                        pads=p,
                        strides=s,
                    )
                )
        elif isinstance(op, autograd._BatchNorm2d):
            node.append(
                helper.make_node(
                    "BatchNormalization",
                    inputs=inputs,
                    outputs=outputs,
                    name=opname,
                    momentum=op.handle.factor,
                )
            )
            # [(<singa.autograd.Sigmoid object at 0x7fd5ec09cb90>, 140556764852432, None, False),
            # (<singa.autograd.Dummy object at 0x7fd5ec09c390>, 140556764824208,
            # <singa.tensor.Tensor object at 0x7fd5ec09c290>, True),
            # (<singa.autograd.Dummy object at 0x7fd5ec09c490>, 140556764824528,
            # <singa.tensor.Tensor object at 0x7fd5ec09c3d0>, True),
            # (<singa.autograd.Dummy object at 0x7fd5ec09c590>, 140556764824784, None, False),
            # (<singa.autograd.Dummy object at 0x7fd5ec09c690>, 140556764825040, None, False)])
            # two dummy operators do not have values, so take the values from handle
            """
            dummy0 = tensor.to_numpy(
                tensor.Tensor(
                    device=op.running_mean.device(), data=op.running_mean
                )
            )
            dummy1 = tensor.to_numpy(
                tensor.Tensor(
                    device=op.running_var.device(), data=op.running_var
                )
            )
            dummy0 = helper.make_node(
                "Constant",
                inputs=[],
                outputs=[inputs[3]],
                value=numpy_helper.from_array(dummy0),
            )
            dummy1 = helper.make_node(
                "Constant",
                inputs=[],
                outputs=[inputs[4]],
                value=numpy_helper.from_array(dummy1),
            )
            node.append(dummy0)
            node.append(dummy1)
            """
        else:
            singa2onnx = {
                "SoftMax": "Softmax",
                "AddBias": "Add",
                "Add": "Add",
                "Matmul": "MatMul",
                "ReLU": "Relu",
                "ElemMatmul": "Mul",
                "Flatten": "Flatten",
                "Tanh": "Tanh",
                "Sigmoid": "Sigmoid"
            }
            assert optype in singa2onnx, "Unsupported op:{}".format(optype)
            onnx_op = singa2onnx[optype]
            node.append(
                helper.make_node(
                    onnx_op, inputs=inputs, outputs=outputs, name=opname
                )
            )

        for srcop, yid, y, _ in op.src:
            dependency[srcop] -= 1
            if dependency[srcop] == 0:
                if isinstance(srcop, autograd.Dummy):
                    if yid not in input_ids:
                        tmp = helper.make_node(
                            "Constant",
                            inputs=[],
                            outputs=[srcop.output_name(0)],
                            value=helper.make_tensor(
                                name=opname,
                                data_type=TensorProto.FLOAT,
                                dims=y.shape,
                                vals=tensor.to_numpy(y)
                                .flatten()
                                .astype(float),
                            ),
                        )
                        node.append(tmp)
                else:
                    ready.append(srcop)

    # print(node)
    onnx_model = helper.make_model(
        helper.make_graph(node[::-1], model_name, X, Y)
    )
    checker.check_model(onnx_model)
    return onnx_model
Пример #39
0
 def _inferred(self, graph):  # type: (GraphProto) -> ModelProto
     orig_model = helper.make_model(graph, producer_name='onnx-test')
     inferred_model = onnx.shape_inference.infer_shapes(orig_model)
     checker.check_model(inferred_model)
     return inferred_model