示例#1
0
    def test_make_optional_value_info(self):  # type: () -> None
        tensor_type_proto = helper.make_tensor_type_proto(elem_type=2,
                                                          shape=[5])
        tensor_val_into = helper.make_value_info(name='test',
                                                 type_proto=tensor_type_proto)
        optional_type_proto = helper.make_optional_type_proto(
            tensor_type_proto)
        optional_val_info = helper.make_value_info(
            name='test', type_proto=optional_type_proto)

        self.assertEqual(optional_val_info.name, 'test')
        self.assertTrue(optional_val_info.type.optional_type)
        self.assertEqual(optional_val_info.type.optional_type.elem_type,
                         tensor_val_into.type)

        # Test Sequence
        sequence_type_proto = helper.make_sequence_type_proto(
            tensor_type_proto)
        optional_type_proto = helper.make_optional_type_proto(
            sequence_type_proto)
        optional_val_info = helper.make_value_info(
            name='test', type_proto=optional_type_proto)

        self.assertEqual(optional_val_info.name, 'test')
        self.assertTrue(optional_val_info.type.optional_type)
        sequence_value_info = helper.make_value_info(
            name='test', type_proto=tensor_type_proto)
        self.assertEqual(
            optional_val_info.type.optional_type.elem_type.sequence_type.
            elem_type, sequence_value_info.type)
示例#2
0
    def test_make_seuence_value_info(self):  # type: () -> None
        tensor_type_proto = helper.make_tensor_type_proto(elem_type=2,
                                                          shape=None)
        sequence_type_proto = helper.make_sequence_type_proto(
            tensor_type_proto)
        sequence_val_info = helper.make_value_info(
            name='test', type_proto=sequence_type_proto)
        sequence_val_info_prim = helper.make_tensor_sequence_value_info(
            name='test', elem_type=2, shape=None)

        self.assertEqual(sequence_val_info, sequence_val_info_prim)
示例#3
0
    def _test_op_upgrade(
        self,
        op: Text,
        from_opset: int,
        input_shapes: List[Union[List[Optional[int]], Text]] = [[3, 4, 5]],
        output_shapes: List[List[Optional[int]]] = [[3, 4, 5]],
        input_types: Union[List[Any], None] = None,
        output_types: Union[List[Any], None] = None,
        initializer: List[Any] = [],
        attrs: Dict[Text, Any] = {},
        seq_inputs: List[int] = [],
        seq_outputs: List[int] = [],
        optional_inputs: List[int] = [],
        optional_outputs: List[int] = []
    ) -> None:
        global tested_ops
        tested_ops.append(op)

        n_inputs = len(input_shapes)
        letters = list(string.ascii_lowercase)[:n_inputs]
        input_names = [
            letter if shape != '' else '' for (letter, shape) in zip(letters, input_shapes)
        ]
        if input_types is None:
            input_types = [TensorProto.FLOAT] * n_inputs
        is_sequence = [0 if id not in seq_inputs else 1 for id in range(n_inputs)]
        is_optional = [0 if id not in optional_inputs else 1 for id in range(n_inputs)]
        # turn empty strings into [0] to ease type analysis, even though those entries
        # will be ignored
        input_shapes_cast = cast(List[List[int]],
                [[0] if isinstance(shape, str) else shape for shape in input_shapes]
        )
        inputs: List[ValueInfoProto] = []
        for (name, ttype, shape, is_seq, is_opt) in \
                zip(input_names, input_types, input_shapes_cast, is_sequence, is_optional):
            if name != '':
                if is_seq:
                    inputs += [helper.make_tensor_sequence_value_info(name, ttype, shape)]
                elif is_opt:
                    type_proto = helper.make_tensor_type_proto(ttype, shape)
                    optional_type_proto = helper.make_optional_type_proto(type_proto)
                    inputs += [helper.make_value_info(name, optional_type_proto)]
                else:
                    inputs += [helper.make_tensor_value_info(name, ttype, shape)]

        n_outputs = len(output_shapes)
        output_names = list(string.ascii_lowercase)[n_inputs:n_inputs + n_outputs]
        if output_types is None:
            output_types = [TensorProto.FLOAT] * n_outputs
        is_sequence = [0 if id not in seq_outputs else 1 for id in range(n_outputs)]
        is_optional = [0 if id not in optional_outputs else 1 for id in range(n_outputs)]
        output_shapes_cast = cast(List[List[int]],
                [[0] if isinstance(shape, str) else shape for shape in output_shapes]
        )
        outputs: List[ValueInfoProto] = []
        for (name, ttype, shape, is_seq, is_opt) in \
                zip(output_names, output_types, output_shapes_cast, is_sequence, is_optional):
            if is_seq:
                outputs += [helper.make_tensor_sequence_value_info(name, ttype, shape)]
            elif is_opt:
                type_proto = helper.make_tensor_type_proto(ttype, shape)
                optional_type_proto = helper.make_optional_type_proto(type_proto)
                outputs += [helper.make_value_info(name, optional_type_proto)]
            else:
                outputs += [helper.make_tensor_value_info(name, ttype, shape)]

        node = helper.make_node(op, input_names, output_names, **attrs)
        graph = helper.make_graph([node], op, inputs, outputs, initializer)
        original = helper.make_model(
            graph,
            producer_name='test',
            opset_imports=[helper.make_opsetid('', from_opset)]
        )
        onnx.checker.check_model(original)
        shape_inference.infer_shapes(original, strict_mode=True)

        converted = version_converter.convert_version(original, latest_opset)
        onnx.checker.check_model(converted)
        shape_inference.infer_shapes(converted, strict_mode=True)
示例#4
0
    def test_aten_embedding(self):
        class NeuralNetEmbedding(torch.nn.Module):
            def __init__(self, num_embeddings, embedding_dim, hidden_size):
                super(NeuralNetEmbedding, self).__init__()
                self.embedding = torch.nn.Embedding(num_embeddings,
                                                    embedding_dim)
                self.linear = torch.nn.Linear(embedding_dim, hidden_size)

            def forward(self, input):
                embedding_result = self.embedding(input)
                return embedding_result, self.linear(embedding_result)

        N, num_embeddings, embedding_dim, hidden_size = 64, 32, 128, 128
        model = NeuralNetEmbedding(num_embeddings, embedding_dim, hidden_size)

        with torch.no_grad():
            x = torch.randint(high=num_embeddings,
                              size=(N, ),
                              dtype=torch.int64)
            dynamic_axes = {
                "x": {
                    0: "x_dim0"
                },
                "y": {
                    0: "y_dim0",
                    1: "y_dim1"
                }
            }

            f = io.BytesIO()

            export(
                model,
                x,
                f=f,
                input_names=["x"],
                output_names=["y"],
                dynamic_axes=dynamic_axes,
                opset_version=14,
            )

            exported_model = onnx.load_model_from_string(f.getvalue())

            # PyTorch exporter emitting ATen op is still under development. Currently convert it manually for testing.
            for node in exported_model.graph.node:
                if node.op_type == "Gather":
                    node.domain = "org.pytorch.aten"
                    node.op_type = "ATen"
                    attr = node.attribute.add()
                    attr.name = "operator"
                    attr.type = 3
                    attr.s = "embedding".encode()
                    exported_model.graph.node.append(
                        helper.make_node(
                            "Constant",
                            [],
                            ["padding_idx"],
                            value=helper.make_tensor("padding_idx",
                                                     TensorProto.INT64, (),
                                                     [-1]),
                        ))
                    exported_model.graph.node.append(
                        helper.make_node(
                            "Constant",
                            [],
                            ["scale_grad_by_freq"],
                            value=helper.make_tensor("scale_grad_by_freq",
                                                     TensorProto.BOOL, (),
                                                     [False]),
                        ))
                    exported_model.graph.node.append(
                        helper.make_node(
                            "Constant",
                            [],
                            ["sparse"],
                            value=helper.make_tensor("sparse",
                                                     TensorProto.BOOL, (),
                                                     [False]),
                        ))
                    node.input.append("padding_idx")
                    node.input.append("scale_grad_by_freq")
                    node.input.append("sparse")
                    exported_model.graph.value_info.append(
                        helper.make_value_info(
                            name=node.output[0],
                            type_proto=helper.make_tensor_type_proto(
                                elem_type=TensorProto.FLOAT,
                                shape=[
                                    node.output[0] + "_dim0",
                                    node.output[0] + "_dim1"
                                ]),
                        ))
                    break

        # The ONNX graph to run contains ATen Op.
        assert any(node.op_type == "ATen"
                   for node in exported_model.graph.node)

        init_aten_op_executor()

        # Run w/o IO binding.
        for _ in range(8):
            x = torch.randint(high=num_embeddings,
                              size=(N, ),
                              dtype=torch.int64)
            pt_y1, pt_y2 = model(x)
            session = ort.InferenceSession(exported_model.SerializeToString(),
                                           providers=["CPUExecutionProvider"])
            ort_y1, ort_y2 = session.run([], {"x": x.numpy()})
            np.testing.assert_almost_equal(ort_y1, pt_y1.detach().numpy())
            np.testing.assert_almost_equal(ort_y2, pt_y2.detach().numpy())

        # Run w/ IO binding.
        for _ in range(8):
            x = torch.randint(high=num_embeddings,
                              size=(N, ),
                              dtype=torch.int64)
            ort_x = ort.OrtValue.ortvalue_from_numpy(x.detach().numpy(), "cpu")
            pt_y1, pt_y2 = model(x)
            np_y1 = np.zeros(tuple(pt_y1.size()), dtype=np.float32)
            np_y2 = np.zeros(tuple(pt_y2.size()), dtype=np.float32)
            ort_y1 = ort.OrtValue.ortvalue_from_numpy(np_y1, "cpu")
            ort_y2 = ort.OrtValue.ortvalue_from_numpy(np_y2, "cpu")
            session = ort.InferenceSession(exported_model.SerializeToString(),
                                           providers=["CPUExecutionProvider"])
            io_binding = session.io_binding()
            io_binding.bind_ortvalue_input(exported_model.graph.input[0].name,
                                           ort_x)
            io_binding.bind_ortvalue_output(
                exported_model.graph.output[0].name, ort_y1)
            io_binding.bind_ortvalue_output(
                exported_model.graph.output[1].name, ort_y2)
            session.run_with_iobinding(io_binding)
            np.testing.assert_almost_equal(np_y1, pt_y1.detach().numpy())
            np.testing.assert_almost_equal(np_y2, pt_y2.detach().numpy())