예제 #1
0
 def test_symbolic_shape_infer(self):
     cwd = os.getcwd()
     test_model_dir = os.path.join(cwd, '..', 'models')
     for filename in Path(test_model_dir).rglob('*.onnx'):
         if filename.name.startswith('.'):
             continue  # skip some bad model files
         print("Running symbolic shape inference on : " + str(filename))
         SymbolicShapeInference.infer_shapes(in_mp=onnx.load(str(filename)),
                                             auto_merge=True,
                                             int_max=100000,
                                             guess_output_rank=True)
예제 #2
0
    def test_mismatched_types(self):
        graph = helper.make_graph(
            [
                helper.make_node(
                    "If", ["x"], ["out"],
                    name="if_node",
                    then_branch=helper.make_graph(
                        [
                            helper.make_node(
                                "Constant",
                                [],
                                ["one_float"],
                                value=helper.make_tensor(
                                    "one_float_value", TensorProto.FLOAT, [],
                                    [1]),
                            )
                        ],
                        "then",
                        [],
                        [
                            helper.make_tensor_value_info(
                                "one_float", TensorProto.FLOAT, [])
                        ],
                    ),
                    else_branch=helper.make_graph(
                        [
                            helper.make_node(
                                "Constant",
                                [],
                                ["one_double"],
                                value=helper.make_tensor(
                                    "one_double", TensorProto.DOUBLE, [], [1]),
                            )
                        ],
                        "else",
                        [],
                        [
                            helper.make_tensor_value_info(
                                "one_double", TensorProto.DOUBLE, [])
                        ],
                    ))
            ],
            "graph",
            [helper.make_tensor_value_info("x", TensorProto.BOOL, [])],
            [helper.make_tensor_value_info("out", TensorProto.FLOAT, [])],
        )
        model = helper.make_model(graph, producer_name="test_mismatched_types")

        with self.assertRaisesRegex(ValueError, r"if_node.*FLOAT.*DOUBLE"):
            SymbolicShapeInference.infer_shapes(model, auto_merge=True)
예제 #3
0
    def _test_einsum_two_inputs_impl(self, input_0_shape, input_1_shape,
                                     output_0_shape, eqn):
        nodes = [
            helper.make_node("Einsum", ["input_0", "input_1"], ["output_0"],
                             "einsum_0",
                             equation=eqn),
        ]
        inputs = [
            helper.make_tensor_value_info('input_0', TensorProto.FLOAT,
                                          input_0_shape),
            helper.make_tensor_value_info('input_1', TensorProto.FLOAT,
                                          input_1_shape),
        ]
        outputs = [
            helper.make_tensor_value_info('output_0', TensorProto.FLOAT, None),
        ]
        graph = helper.make_graph(nodes, "Einsum_Test", inputs, outputs, [])
        model = helper.make_model(graph)

        inferred = SymbolicShapeInference.infer_shapes(model, auto_merge=True)
        expected_shapes = [
            helper.make_tensor_value_info('output_0', TensorProto.FLOAT,
                                          output_0_shape)
        ]
        self._check_shapes(graph, inferred.graph, expected_shapes)
예제 #4
0
    def test_softmax_cross_entropy_loss(self):
        hidden_size = 1024

        nodes = [
            helper.make_node("SoftmaxCrossEntropyLoss",
                             inputs=["logits", "labels"],
                             outputs=["loss"]),
        ]

        inputs = [
            helper.make_tensor_value_info('logits', TensorProto.FLOAT,
                                          ['b', 's', hidden_size]),
            helper.make_tensor_value_info('labels', TensorProto.INT32,
                                          ['b', 's']),
        ]

        outputs = [
            helper.make_tensor_value_info('loss', TensorProto.FLOAT, None),
        ]

        graph = helper.make_graph(nodes, "SoftmaxCrossEntropyLoss_Test",
                                  inputs, outputs, [])
        model = helper.make_model(graph)

        inferred = SymbolicShapeInference.infer_shapes(model, auto_merge=True)
        expected_shapes = [
            helper.make_tensor_value_info('loss', TensorProto.FLOAT, [])
        ]
        self._check_shapes(graph, inferred.graph, expected_shapes)
    def test_gather_indices(self):
        graph = helper.make_graph(
            [
                helper.make_node(
                    "Constant",
                    [],
                    ["data"],
                    "constant",
                    value=helper.make_tensor("input", TensorProto.FLOAT, [5], [0.0, 1.0, 2.0, 3.0, 4.0]),
                ),
                helper.make_node("Gather", ["data", "indices"], ["output"], axis=0),
            ],
            "Gather_Test",
            [
                helper.make_tensor_value_info("indices", TensorProto.INT64, ["b"]),
            ],
            [
                helper.make_tensor_value_info("output", TensorProto.FLOAT, ["b"]),
            ],
        )
        model = helper.make_model(graph, producer_name="Gather_Test_Model")
        model.opset_import[0].version = 13

        inferred = SymbolicShapeInference.infer_shapes(model, auto_merge=True)
        expected_shapes = [
            helper.make_tensor_value_info("data", TensorProto.FLOAT, [5]),
            helper.make_tensor_value_info("output", TensorProto.FLOAT, ["b"]),
        ]
        self._check_shapes(graph, inferred.graph, expected_shapes)
    def test_unsqueeze_opset_13(self):
        graph = helper.make_graph(
            [
                helper.make_node("Unsqueeze", ["input", "axes"], ["temp"]),
                helper.make_node("Identity", ["temp"], ["output"]),
            ],
            "Unsqueeze_Test",
            [
                helper.make_tensor_value_info("input", TensorProto.FLOAT, ["b", "s"]),
            ],
            [
                helper.make_tensor_value_info("output", TensorProto.FLOAT, ["b", "s", 1]),
            ],
            [
                helper.make_tensor("axes", TensorProto.INT64, [1], [-1]),
            ],
        )
        model = helper.make_model(graph, producer_name="Unsqueeze_Test_Model")
        model.opset_import[0].version = 13

        inferred = SymbolicShapeInference.infer_shapes(model, auto_merge=True)
        expected_shapes = [
            helper.make_tensor_value_info("temp", TensorProto.FLOAT, ["b", "s", 1]),
            helper.make_tensor_value_info("output", TensorProto.FLOAT, ["b", "s", 1]),
        ]
        self._check_shapes(graph, inferred.graph, expected_shapes)
    def check_slice_of_concat(self, input_dims, start, end, step, expected_output_dim):
        _dimstrmap = {dim: f"dim{i}" for i, dim in enumerate(input_dims)}

        def dimstrmap(dim):
            return _dimstrmap.get(dim, dim)

        def get_initializer(name):
            valuemap = {"zero": 0, "one": 1, "two": 2, "ten": 10, "intmax": 2**32}
            value = -valuemap[name[4:]] if name.startswith("neg_") else valuemap[name]
            return onnx.helper.make_tensor(name, TensorProto.INT64, [1], [value])

        initializers = [
            get_initializer(name)
            for name in [
                "zero",
                "one",
                "two",
                "ten",
                "intmax",
                "neg_intmax",
                "neg_one",
                "neg_ten",
            ]
        ]
        inputs = []
        nodes = []
        for i, dim in enumerate(input_dims):
            inputs.append(onnx.helper.make_tensor_value_info(f"t{i}", TensorProto.FLOAT, ["B", dim]))
            nodes.extend(
                [
                    onnx.helper.make_node("Shape", [f"t{i}"], [f"shape{i}"]),
                    onnx.helper.make_node("Slice", [f"shape{i}", "one", "two", "zero", "one"], [f"dim{i}"]),
                    onnx.helper.make_node("Neg", [f"dim{i}"], [f"neg_dim{i}"]),
                ]
            )

        def make_concat_dims(concat_name, dims):
            dims = [f"neg_{dimstrmap(dim[1:])}" if dim.startswith("-") else dimstrmap(dim) for dim in dims]
            return onnx.helper.make_node("Concat", dims, [concat_name], axis=0)

        nodes.extend(
            [
                onnx.helper.make_node("Concat", [inp.name for inp in inputs], ["concat"], axis=1),
                make_concat_dims("starts", ["zero", start]),
                make_concat_dims("ends", ["intmax", end]),
                make_concat_dims("axes", ["zero", "one"]),
                make_concat_dims("steps", ["one", step]),
                onnx.helper.make_node("Slice", ["concat", "starts", "ends", "axes", "steps"], ["output"]),
            ]
        )
        output = onnx.helper.make_tensor_value_info("output", TensorProto.FLOAT, ["d1", "d2"])
        graph_def = onnx.helper.make_graph(nodes, "graph", inputs, [output], initializer=initializers)
        model = SymbolicShapeInference.infer_shapes(onnx.helper.make_model(graph_def))
        output = unique_element(model.graph.output)
        shape = [d.dim_param if d.dim_param else d.dim_value for d in output.type.tensor_type.shape.dim]
        self.assertEqual(shape, ["B", expected_output_dim])
예제 #8
0
    def test_embed_layer_norm(self):
        hidden_size = 32
        initializers = [
            helper.make_tensor('word_embedding', TensorProto.FLOAT,
                               [100, hidden_size],
                               [1.0] * (100 * hidden_size)),
            helper.make_tensor('position_embedding', TensorProto.FLOAT,
                               [20, hidden_size], [1.0] * (20 * hidden_size)),
            helper.make_tensor('segment_embedding', TensorProto.FLOAT,
                               [2, hidden_size], [1.0] * (2 * hidden_size)),
            helper.make_tensor('gamma', TensorProto.FLOAT, [hidden_size],
                               [1.0] * hidden_size),
            helper.make_tensor('beta', TensorProto.FLOAT, [hidden_size],
                               [1.0] * hidden_size)
        ]

        nodes = [
            helper.make_node("EmbedLayerNormalization",
                             inputs=[
                                 "input_ids", "segment_ids", "word_embedding",
                                 "position_embedding", "segment_embedding",
                                 "gamma", "beta"
                             ],
                             outputs=["output", "mask_index"],
                             domain="com.microsoft"),
        ]

        inputs = [
            helper.make_tensor_value_info('input_ids', TensorProto.FLOAT,
                                          ['b', 's']),
            helper.make_tensor_value_info('segment_ids', TensorProto.FLOAT,
                                          ['b', 's']),
        ]

        outputs = [
            helper.make_tensor_value_info('output', TensorProto.FLOAT, None),
            helper.make_tensor_value_info('mask_index', TensorProto.INT32,
                                          None),
        ]

        graph = helper.make_graph(nodes, "Unsqueeze_Test", inputs, outputs,
                                  initializers)
        model = helper.make_model(graph)

        inferred = SymbolicShapeInference.infer_shapes(model, auto_merge=True)
        expected_shapes = [
            helper.make_tensor_value_info('output', TensorProto.FLOAT,
                                          ['b', 's', hidden_size]),
            helper.make_tensor_value_info('mask_index', TensorProto.INT32,
                                          ['b'])
        ]
        self._check_shapes(graph, inferred.graph, expected_shapes)
예제 #9
0
    def test_unsqueeze_opset_11(self):
        graph = helper.make_graph([
            helper.make_node("Unsqueeze", ["input"], ["temp"], axes=[0]),
            helper.make_node("Identity", ["temp"], ["output"]),
        ], "Unsqueeze_Test", [
            helper.make_tensor_value_info('input', TensorProto.FLOAT, ['b', 's']),
        ], [
            helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 'b', 's']),
        ])
        model = helper.make_model(graph, producer_name='Unsqueeze_Test_Model')
        model.opset_import[0].version = 11

        inferred = SymbolicShapeInference.infer_shapes(model, auto_merge=True)
        expected_shapes = [
            helper.make_tensor_value_info('temp', TensorProto.FLOAT, [1, 'b', 's']),
            helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 'b', 's'])
        ]
        self._check_shapes(graph, inferred.graph, expected_shapes)
예제 #10
0
    onnx.save(mp, output_model)


def parse_arguments():
    parser = argparse.ArgumentParser()
    parser.add_argument('--mode',
                        help='The modification mode',
                        choices=['to_scan', 'remove_initializers_from_inputs'])
    parser.add_argument('--input', help='The input model file', default=None)
    parser.add_argument('--output', help='The output model file', default=None)
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_arguments()
    print('input model: ' + args.input)
    print('output model ' + args.output)
    if args.mode == 'to_scan':
        print('Convert LSTM to Scan...')
        convert_to_scan_model(args.input, args.output)
    elif args.mode == 'remove_initializers_from_inputs':
        print(
            'Remove all initializers from input for model with IR version >= 4...'
        )
        remove_initializers_from_inputs(args.input, args.output)
    else:
        raise NotImplementedError('Unknown mode')
    print('Running symbolic shape inference on output model')
    SymbolicShapeInference.infer_shapes(args.output, args.output)
    print('Done!')
예제 #11
0
def parse_arguments():
    parser = argparse.ArgumentParser()
    parser.add_argument('--mode',
                        help='The modification mode',
                        choices=['to_scan', 'remove_initializers_from_inputs'])
    parser.add_argument('--input', help='The input model file', default=None)
    parser.add_argument('--output', help='The output model file', default=None)
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_arguments()
    print('input model: ' + args.input)
    print('output model ' + args.output)
    if args.mode == 'to_scan':
        print('Convert LSTM/GRU/RNN to Scan...')
        convert_to_scan_model(args.input, args.output)
    elif args.mode == 'remove_initializers_from_inputs':
        print(
            'Remove all initializers from input for model with IR version >= 4...'
        )
        remove_initializers_from_inputs(args.input, args.output)
    else:
        raise NotImplementedError('Unknown mode')
    print('Running symbolic shape inference on output model')
    SymbolicShapeInference.infer_shapes(args.output,
                                        args.output,
                                        auto_merge=True)
    print('Done!')
예제 #12
0
def perf_test(rnn_type,
              num_threads,
              input_dim,
              hidden_dim,
              bidirectional,
              layers,
              seq_len,
              batch_size,
              top_n=5,
              min_duration_seconds=10):
    set_num_threads(num_threads)

    model_name = '{}_i{}_h{}_{}_l{}_{}.onnx'.format(
        rnn_type, input_dim, hidden_dim, 'bi' if bidirectional else '', layers,
        'batched' if batch_size > 1 else 'no_batch')

    generate_model(rnn_type, input_dim, hidden_dim, bidirectional, layers,
                   model_name, batch_size == 1)
    feeds = {
        'input': np.random.rand(seq_len, batch_size,
                                input_dim).astype(np.float32)
    }

    # run original model
    sess = onnxruntime.InferenceSession(model_name)
    count, duration, per_iter_cost = perf_run(
        sess,
        feeds,
        min_counts=top_n,
        min_duration_seconds=min_duration_seconds)
    avg_rnn = top_n_avg(per_iter_cost, top_n)
    print('perf_rnn {}: run for {} iterations, top {} avg {} ms'.format(
        model_name, count, top_n, avg_rnn))

    # run Scan model converted from original
    from model_editor import convert_to_scan_model
    from symbolic_shape_infer import SymbolicShapeInference
    scan_model_name = os.path.splitext(model_name)[0] + '_scan.onnx'
    convert_to_scan_model(model_name, scan_model_name)
    # note that symbolic shape inference is needed because model has symbolic batch dim, thus init_state is ConstantOfShape
    SymbolicShapeInference.infer_shapes(scan_model_name, scan_model_name)
    sess = onnxruntime.InferenceSession(scan_model_name)
    count, duration, per_iter_cost = perf_run(
        sess,
        feeds,
        min_counts=top_n,
        min_duration_seconds=min_duration_seconds)
    avg_scan = top_n_avg(per_iter_cost, top_n)
    print('perf_scan {}: run for {} iterations, top {} avg {} ms'.format(
        scan_model_name, count, top_n, avg_scan))

    # quantize Scan model to int8
    from model_quantizer import convert_matmul_model
    int8_model_name = os.path.splitext(model_name)[0] + '_int8.onnx'
    convert_matmul_model(scan_model_name, int8_model_name)
    SymbolicShapeInference.infer_shapes(int8_model_name, int8_model_name)
    sess = onnxruntime.InferenceSession(int8_model_name)
    count, duration, per_iter_cost = perf_run(
        sess,
        feeds,
        min_counts=top_n,
        min_duration_seconds=min_duration_seconds)
    avg_int8 = top_n_avg(per_iter_cost, top_n)
    print('perf_int8 {}: run for {} iterations, top {} avg {} ms'.format(
        int8_model_name, count, top_n, avg_int8))

    return avg_rnn, avg_scan, avg_int8