Пример #1
0
  def test_if_with_sequence(self):
    # S = [a]
    # if cond is True
    #   S = [a,b]
    # else
    #   S = [a,c]
    a = np.random.randn(2, 1, 2).astype(np.float32)
    b = np.random.randn(1, 1, 2).astype(np.float32)
    c = np.random.randn(3, 1, 2).astype(np.float32)
    seq_construct_node = helper.make_node('SequenceConstruct', ['a'], ['S'])
    seq_insert_node1 = helper.make_node('SequenceInsert', ['S', 'b'], ['Sb'])
    seq_insert_node2 = helper.make_node('SequenceInsert', ['S', 'c'], ['Sc'])

    a_in = helper.make_tensor_value_info('a', onnx.TensorProto.FLOAT, [2, 1, 2])
    b_in = helper.make_tensor_value_info('b', onnx.TensorProto.FLOAT, [1, 1, 2])
    c_in = helper.make_tensor_value_info('c', onnx.TensorProto.FLOAT, [3, 1, 2])
    cond_in = helper.make_tensor_value_info('cond', TensorProto.BOOL, [])
    s_in = helper.make_sequence_value_info('S', TensorProto.FLOAT,
                                           [None, None, None, None])

    sb_out = helper.make_sequence_value_info('Sb', TensorProto.FLOAT,
                                             [None, None, None, None])
    sc_out = helper.make_sequence_value_info('Sc', TensorProto.FLOAT,
                                             [None, None, None, None])
    s_final_out = helper.make_sequence_value_info('S_final', TensorProto.FLOAT,
                                                  [None, None, None, None])

    then_graph = helper.make_graph(nodes=[seq_insert_node1],
                                   name="then_graph",
                                   inputs=[s_in, b_in],
                                   outputs=[sb_out])
    else_graph = helper.make_graph(nodes=[seq_insert_node2],
                                   name="else_graph",
                                   inputs=[s_in, c_in],
                                   outputs=[sc_out])
    if_node = helper.make_node('If', ['cond'], ['S_final'],
                               then_branch=then_graph,
                               else_branch=else_graph)

    graph_def = helper.make_graph(nodes=[seq_construct_node, if_node],
                                  name='test_if',
                                  inputs=[a_in, b_in, c_in, cond_in],
                                  outputs=[s_final_out])
    tf_rep = prepare(helper.make_model(graph_def))
    output = tf_rep.run({
        'a': a,
        'b': b,
        'c': c,
        'cond': np.array(True, dtype=np.bool)
    })
    np.testing.assert_almost_equal(output['S_final'].values[:2], a)
    np.testing.assert_almost_equal(output['S_final'].values[2:], b)
    output = tf_rep.run({
        'a': a,
        'b': b,
        'c': c,
        'cond': np.array(False, dtype=np.bool)
    })
    np.testing.assert_almost_equal(output['S_final'].values[:2], a)
    np.testing.assert_almost_equal(output['S_final'].values[2:], c)
        def expect(node, inputs, outputs, name):
            ginputs = [
                make_sequence_value_info(node.input[0], TensorProto.FLOAT, []),  # pylint: disable=E1101,
                make_sequence_value_info(node.input[1], TensorProto.FLOAT, []),  # pylint: disable=E1101,
            ]
            if len(node.input) > 2:
                ginputs.append(
                    make_tensor_value_info(node.input[2], TensorProto.INT64,
                                           []),  # pylint: disable=E1101
                )
            goutputs = [
                make_sequence_value_info(node.output[0], TensorProto.FLOAT,
                                         []),  # pylint: disable=E1101,
            ]
            model_def = make_model(
                opset_imports=[make_operatorsetid('', TARGET_OPSET)],
                graph=make_graph(name=name,
                                 inputs=ginputs,
                                 outputs=goutputs,
                                 nodes=[node]))
            oinf = OnnxInference(model_def)
            got = oinf.run({n: v for n, v in zip(node.input, inputs)})
            self.assertEqual(len(got), 1)
            oseq = got['output_sequence']
            self.assertEqual(len(oseq), len(outputs))
            for e, g in zip(outputs, oseq):
                self.assertEqualArray(e, g)

            del model_def.opset_import[:]  # pylint: disable=E1101
            op_set = model_def.opset_import.add()  # pylint: disable=E1101
            op_set.domain = ''
            op_set.version = 15
            model_def.ir_version = 8
Пример #3
0
    def test_loop_with_sequence(self):
        if legacy_opset_pre_ver(14):
            raise unittest.SkipTest(
                "ONNX version {} doesn't support helper.make_tensor_sequence_value_info."
                .format(defs.onnx_opset_version()))

        # construct sequence S with tensor a in it
        # insert tensor b into sequence S for 3 time
        a = np.random.randn(2, 1, 2).astype(np.float32)
        b = np.random.randn(1, 1, 2).astype(np.float32)
        M = np.array(3, dtype=np.int64)
        cond = np.array(True, dtype=np.bool)
        seq_construct_node = helper.make_node('SequenceConstruct', ['a'],
                                              ['S'])
        seq_insert_node = helper.make_node('SequenceInsert', ['S', 'b'],
                                           ['Updated_S'])

        a_in = helper.make_tensor_value_info('a', onnx.TensorProto.FLOAT,
                                             [2, 1, 2])
        b_in = helper.make_tensor_value_info('b', onnx.TensorProto.FLOAT,
                                             [1, 1, 2])
        M_in = helper.make_tensor_value_info('M', TensorProto.INT64, [])
        cond_init_in = helper.make_tensor_value_info('cond_init',
                                                     TensorProto.BOOL, [])
        iter_count_in = helper.make_tensor_value_info('iter_count',
                                                      TensorProto.INT64, [])
        cond_in = helper.make_tensor_value_info('cond', TensorProto.BOOL, [])
        s_in = helper.make_sequence_value_info('S', TensorProto.FLOAT,
                                               [None, None, None, None])

        cond_out = helper.make_tensor_value_info('cond', TensorProto.BOOL, [])
        s_out = helper.make_sequence_value_info('Updated_S', TensorProto.FLOAT,
                                                [None, None, None, None])
        s_final_out = helper.make_sequence_value_info('S_final',
                                                      TensorProto.FLOAT,
                                                      [None, None, None, None])

        body_graph = helper.make_graph(nodes=[seq_insert_node],
                                       name="for_loop_graph",
                                       inputs=[iter_count_in, cond_in, s_in],
                                       outputs=[cond_out, s_out])
        loop_node = helper.make_node('Loop', ['M', '', 'S'], ['S_final'],
                                     body=body_graph)

        graph_def = helper.make_graph(nodes=[seq_construct_node, loop_node],
                                      name='test_loop',
                                      inputs=[a_in, b_in, M_in, cond_init_in],
                                      outputs=[s_final_out])
        tf_rep = prepare(helper.make_model(graph_def))
        output = tf_rep.run({'a': a, 'b': b, 'M': M, 'cond_init': cond})
        np.testing.assert_almost_equal(output['S_final'][0], a)
        np.testing.assert_almost_equal(output['S_final'][1], b)
        np.testing.assert_almost_equal(output['S_final'][2], b)
        np.testing.assert_almost_equal(output['S_final'][3], b)
Пример #4
0
def make_onnx_inputs_outputs(name, elem_type, shape, **kwargs):
    """Wrapper for creating onnx graph inputs or outputs
       name,  # type: Text
       elem_type,  # type: TensorProto.DataType
       shape,  # type: Optional[Sequence[int]]
    """
    if elem_type is None:
        elem_type = onnx_pb.TensorProto.UNDEFINED
    elif isinstance(elem_type, SeqType):
        return helper.make_sequence_value_info(name, elem_type.dtype,
                                               make_onnx_shape(shape),
                                               **kwargs)
    return helper.make_tensor_value_info(name, elem_type,
                                         make_onnx_shape(shape), **kwargs)
Пример #5
0
def create_batch_output_sequence(model: onnx.ModelProto,
                                 output_name: str = 'output',
                                 output_prefix: str = 'output_'):
    """
    """
    outputs = get_outputs(model)
    batch_outputs = list(
        filter(lambda x: x.name.startswith(output_prefix), outputs))
    print(batch_outputs)
    get_node_name = lambda x: x.name
    batch_output_names = list(map(get_node_name, batch_outputs))
    ## squeeze outputs before construct sequence
    ## TODO : read dim first
    # squeezed_output_nodes = []
    # squeezed_output_names = []
    # for output in batch_outputs :
    #     squeezed_output_name = 'squeezed_{}'.format(output.name)
    #     squeezed_output_nodes.append(helper.make_node(
    #         'Squeeze',
    #         inputs=[output.name], axes=[0],
    #         outputs=[squeezed_output_name],
    #     ))
    #     squeezed_output_names.append(squeezed_output_name)
    ## create sequence info and node
    sequence_value_info = helper.make_sequence_value_info(
        name=output_name,
        shape=None,
        elem_type=onnx.TensorProto.FLOAT,
    )
    # sequence_construct_node = helper.make_node(
    #     'SequenceConstruct',
    #     inputs=squeezed_output_names,
    #     outputs=[output_name]
    # )
    sequence_construct_node = helper.make_node('SequenceConstruct',
                                               inputs=batch_output_names,
                                               outputs=[output_name])
    # for node in squeezed_output_nodes :
    #     model.graph.node.append(node)
    model.graph.node.append(sequence_construct_node)
    model.graph.output.append(sequence_value_info)
    return model
    def test_loop_additional_input(self):
        # Given a tensor x of values [x1, ..., xN],
        # Return a sequence of tensors of
        #   [[x1], [x1, x2], ..., [x1, ..., xN]]

        cond_in = make_tensor_value_info('cond_in', TensorProto.BOOL, [])  # pylint: disable=E1101
        cond_out = make_tensor_value_info('cond_out', TensorProto.BOOL, [])  # pylint: disable=E1101
        iter_count = make_tensor_value_info('iter_count', TensorProto.INT64,
                                            [])  # pylint: disable=E1101
        seq_in = make_tensor_sequence_value_info('seq_in', TensorProto.FLOAT,
                                                 None)  # pylint: disable=E1101
        seq_out = make_tensor_sequence_value_info('seq_out', TensorProto.FLOAT,
                                                  None)  # pylint: disable=E1101

        x = numpy.array([1, 2, 3, 4, 5]).astype(numpy.float32)

        x_const_node = make_node(
            'Constant',
            inputs=[],
            outputs=['x'],
            value=make_tensor(
                name='const_tensor_x',
                data_type=TensorProto.FLOAT,  # pylint: disable=E1101
                dims=x.shape,
                vals=x.flatten().astype(float)))

        zero_const_node = make_node(
            'Constant',
            inputs=[],
            outputs=['slice_start'],
            value=make_tensor(
                name='const_tensor_zero',
                data_type=TensorProto.INT64,  # pylint: disable=E1101
                dims=(1, ),
                vals=[0]))

        axes_node = make_node(
            'Constant',
            inputs=[],
            outputs=['axes'],
            value=make_tensor(
                name='const_tensor_axes',
                data_type=TensorProto.INT64,  # pylint: disable=E1101
                dims=(),
                vals=[0]))

        add_node = make_node('Add',
                             inputs=['iter_count', 'XI'],
                             outputs=['slice_end'])

        slice_node = make_node('Slice',
                               inputs=['x', 'slice_start', 'slice_end'],
                               outputs=['slice_out'])

        insert_node = make_node('SequenceInsert',
                                inputs=['seq_in', 'slice_out'],
                                outputs=['seq_out'])

        identity_node = make_node('Identity',
                                  inputs=['cond_in'],
                                  outputs=['cond_out'])

        loop_body = make_graph([
            identity_node, x_const_node, zero_const_node, add_node, axes_node,
            slice_node, insert_node
        ], 'loop_body', [iter_count, cond_in, seq_in], [cond_out, seq_out])

        node = make_node('Loop',
                         inputs=['trip_count', 'cond', 'seq_empty'],
                         outputs=['seq_res'],
                         body=loop_body)
        node1 = make_node('Neg', inputs=['XI'], outputs=['Y'])
        node_concat = make_node('ConcatFromSequence',
                                inputs=['seq_res'],
                                outputs=['res'],
                                axis=0,
                                new_axis=0)

        trip_count = numpy.array(5).astype(numpy.int64)
        seq_empty = []  # type: List[Any]
        cond = numpy.array(1).astype(numpy.bool)

        model_def = make_model(
            opset_imports=[make_operatorsetid('', TARGET_OPSET)],
            graph=make_graph(
                name='loop_test',
                inputs=[
                    make_tensor_value_info('trip_count', TensorProto.INT64,
                                           trip_count.shape),  # pylint: disable=E1101
                    make_tensor_value_info('cond', TensorProto.BOOL,
                                           cond.shape),  # pylint: disable=E1101
                    make_sequence_value_info('seq_empty', TensorProto.FLOAT,
                                             []),  # pylint: disable=E1101
                    make_tensor_value_info('XI', TensorProto.INT64, [])
                ],  # pylint: disable=E1101
                outputs=[
                    make_tensor_value_info('res', TensorProto.FLOAT, None),  # pylint: disable=E1101
                    make_tensor_value_info('Y', TensorProto.INT64, [])
                ],  # pylint: disable=E1101
                nodes=[node1, node, node_concat]))

        del model_def.opset_import[:]  # pylint: disable=E1101
        op_set = model_def.opset_import.add()  # pylint: disable=E1101
        op_set.domain = ''
        op_set.version = 15
        model_def.ir_version = 8

        expected = numpy.array(
            [1., 1., 2., 1., 2., 3., 1., 2., 3., 4., 1., 2., 3., 4., 5.],
            dtype=numpy.float32)
        X = numpy.array([1], dtype=numpy.int64)
        for rt in ['python', 'onnxruntime1', 'python_compiled']:
            with self.subTest(rt=rt):
                oinf = OnnxInference(model_def, runtime=rt)
                inputs = {
                    'trip_count': trip_count,
                    'cond': cond,
                    'seq_empty': seq_empty,
                    'XI': X
                }
                if rt == 'python_compiled':
                    code = str(oinf)
                    self.assertIn("context={'XI': XI}", code)
                got = oinf.run(inputs)
                self.assertEqualArray(-X, got['Y'])
                self.assertEqualArray(expected, got['res'])
                if rt == 'python':
                    siz = oinf.infer_sizes(inputs)
                    self.assertIsInstance(siz, dict)
                    typ = oinf.infer_types()
                    self.assertEqual(typ["trip_count"], numpy.int64)
                    if 'cond' in typ:
                        self.assertEqual(typ["cond"], numpy.bool_)
                    for k, v in typ.items():
                        if k in {'trip_count', 'cond', 'Y', 'XI'}:
                            continue
                        self.assertIsInstance(v, SequenceType)
def make_tensor_sequence_value_info(name, tensor_type, shape):
    return make_sequence_value_info(name, tensor_type, shape, None)