Пример #1
0
    def test_front(self):
        graph = build_graph(
            nodes,
            [('input', 'Op1', {
                'in': 0,
                'out': 0,
                'fw_tensor_debug_info': [('input', 'input'),
                                         ('Op1', 'Op1,Op2')]
            })])
        graph.stage = 'front'
        input_node = Node(graph, 'input')
        self.assertTrue(
            input_node.out_port(0).get_tensor_names() ==
            ['Op1\\,Op2', 'input'])

        op1_node = Node(graph, 'Op1')
        op1_node.add_output_port(0)
        self.assertTrue(op1_node.out_port(0).get_tensor_names() == [])

        input_node.out_port(0).add_tensor_names(
            ["A", "B", "C"], [["A:0"], ["B:0", "B:1", "B:2"], ["C:0"]])
        self.assertTrue(
            input_node.out_port(0).get_tensor_debug_info() == [(
                'input',
                'input'), ('Op1',
                           'Op1,Op2'), ("A",
                                        "A:0"), ("B",
                                                 "B:0,B:1,B:2"), ("C", "C:0")])
        self.assertTrue(
            input_node.out_port(0).get_tensor_names() ==
            ['A:0', 'B:0\\,B:1\\,B:2', 'C:0', 'Op1\\,Op2', 'input'])
Пример #2
0
    def test_variadic_split_axis(self, axis):
        lengths = int64_array([2, 13, 10])
        graph = build_graph(
            self.nodes, self.edges, {
                'split_input_data': {
                    'shape': int64_array([2, 12, 25, 30])
                },
                'split_axis_data': {
                    'value': axis
                },
                'split_lengths_data': {
                    'value': lengths
                },
                'split_op': {
                    'out_ports_count': 4
                },
            })
        node = Node(graph, 'split_op')
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)

        VariadicSplit.infer(node)

        ont_nodes_count = len(node.out_edges())
        self.assertTrue(ont_nodes_count == 3)
        for out in range(ont_nodes_count):
            self.assertTrue(
                np.all(
                    node.out_node(out).shape == int64_array(
                        [2, 12, lengths[out], 30])))
Пример #3
0
    def test_case4_dest(self):
        graph = build_graph(nodes,
                            [('input', 'Op1', {
                                'in': 0,
                                'out': 0,
                                'fw_tensor_debug_info': [('input', 0, 'input')]
                            })])
        graph_ref = build_graph(
            nodes, [('NewOp', 'Op1', {
                'in': 0,
                'out': 0,
                'fw_tensor_debug_info': [('input', 0, 'input')]
            })])

        op1_node = Node(graph, 'Op1')
        new_node = Node(graph, 'NewOp')
        new_node.add_output_port(0)

        graph.stage = 'front'
        new_node.out_port(0).get_connection().set_destination(
            op1_node.in_port(0), "dest")

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'Op1',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
        self.check_graph_attrs_front(graph, graph_ref)
Пример #4
0
    def test_middle(self):
        graph = build_graph(nodes, [('input', 'input_data'),
                                    ('input_data', 'Op1'),
                                    ('input_data', 'Op2')])

        input_node = Node(graph, 'input')
        self.assertTrue(
            input_node.out_port(0).get_tensor_names() ==
            ['Op1\\,Op2', 'input'])

        op1_node = Node(graph, 'Op1')
        op1_node.add_output_port(0)
        self.assertTrue(op1_node.out_port(0).get_tensor_names() == [])

        op2_node = Node(graph, 'Op2')
        op2_node.add_output_port(0)
        self.assertTrue(op2_node.out_port(0).get_tensor_names() == [])

        input_node.out_port(0).add_tensor_names(
            ["A", "B", "C"], [["A:0"], ["B:0", "B:1", "B:2"], ["C:0"]])
        self.assertTrue(
            input_node.out_port(0).get_tensor_debug_info() == [(
                'input',
                'input'), ('Op1',
                           'Op1,Op2'), ("A",
                                        "A:0"), ("B",
                                                 "B:0,B:1,B:2"), ("C", "C:0")])
        self.assertTrue(
            input_node.out_port(0).get_tensor_names() ==
            ['A:0', 'B:0\\,B:1\\,B:2', 'C:0', 'Op1\\,Op2', 'input'])
Пример #5
0
    def test_case3_dest(self):
        graph = build_graph(nodes, [('input', 'input_data'),
                                    ('input_data', 'Op1')])
        graph_ref = build_graph(nodes, [('input', 'input_data'),
                                        ('NewOp', 'NewOp_data'),
                                        ('NewOp_data', 'Op1')])

        new_op_data = Node(graph_ref, 'NewOp_data')
        new_op_data['fw_tensor_debug_info'] = [('input', 'input')]

        input_data = Node(graph_ref, 'input_data')
        del input_data['fw_tensor_debug_info']

        op1_node = Node(graph, 'Op1')
        new_node = Node(graph, 'NewOp')
        new_node.add_output_port(0)
        op1_node.in_port(0).get_connection().set_source(
            new_node.out_port(0), "dest")

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'Op1',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
        self.check_graph_attrs_middle(graph, graph_ref)
Пример #6
0
    def test_port_renumber(self):
        graph = build_graph(nodes, [('input', 'input_data'),
                                    ('input_data', 'Op1'),
                                    ('Op1', 'Op1_data', {
                                        'out': 1
                                    }), ('Op1_data', 'Op2')])
        input_node = Node(graph, 'input')
        self.assertTrue(
            input_node.out_port(0).get_tensor_names(
                port_renumber=True) == ['Op1\\,Op2', 'input'])

        op1_node = Node(graph, 'Op1')
        op1_node.add_output_port(0)

        self.assertTrue(
            op1_node.out_port(0).get_tensor_names(
                port_renumber=True) == ['Op1\\,Op2'])

        input_node.out_port(0).add_tensor_names(
            ["A:0", "B:0", "B:1", "B:2", "C:0"])
        self.assertTrue(
            input_node.out_port(0).get_tensor_debug_info() == [(
                'input',
                'input'), ('Op1', 'Op1,Op2'), ("input", "A:0"), (
                    "input", "B:0"), ("input", "B:1"), ("input",
                                                        "B:2"), ("input",
                                                                 "C:0")])
        self.assertTrue(
            input_node.out_port(0).get_tensor_names() ==
            ['A:0', 'B:0', 'B:1', 'B:2', 'C:0', 'Op1\\,Op2', 'input'])
        input_node.out_port(0).remove_tensor_names(port_renumber=True)
        self.assertTrue(input_node.out_port(0).get_tensor_debug_info() == [])
        self.assertTrue(input_node.out_port(0).get_tensor_names() == [])
Пример #7
0
    def test_variadic_split_non_zero(self):
        graph = build_graph(nodes_attributes,
                            [('placeholder_1', 'placeholder_data'), ('placeholder_data', 'variadic_split'),
                             ('variadic_split', 'variadic_split_data_1'), ('variadic_split_data_1', 'last'),
                             ('last', 'last_data'), ('last_data', 'res'),

                             ('axis_const', 'axis_const_data'),
                             ('split_dim_const', 'split_dim_const_data'),
                             ('axis_const_data', 'variadic_split', {'in': 1}),
                             ('split_dim_const_data', 'variadic_split', {'in': 2}),
                             ], nodes_with_edges_only=True)
        node = Node(graph, 'variadic_split')

        # extractor should do it
        node['out_ports_count'] = 3
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)

        replacer = AddFakeOutputsToVariadicSplit()
        replacer.find_and_replace_pattern(graph)

        for n in graph.get_op_nodes():
            n['need_shape_inference'] = False
        graph_clean_up(graph)

        self.assertTrue(len(node.out_edges()) == 3)
Пример #8
0
    def test_negative_variadic_split_axis(self, axis):
        lengths = int64_array([2, 13, 10])
        graph = build_graph(
            self.nodes, self.edges, {
                'split_input_data': {
                    'shape': int64_array([2, 12, 25, 30])
                },
                'split_axis_data': {
                    'value': axis
                },
                'split_lengths_data': {
                    'value': lengths
                },
                'split_op': {
                    'out_ports_count': 4
                },
            })
        node = Node(graph, 'split_op')
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)

        try:
            VariadicSplit.infer(node)
        except AssertionError as e:
            self.assertTrue(
                e.args[0] ==
                'VariadicSplit `axis` should be scalar or tensor with shape [1], '
                'but it`s not for node split_op')
Пример #9
0
    def test_splitv_dynamic_input(self):
        ref_input_shape = [7, 4, 11]
        axis = 2
        num_splits = 2
        output_shape_1 = [dynamic_dimension, 4, 3]
        output_shape_2 = [7, dynamic_dimension, 3]
        output_shape_3 = [7, dynamic_dimension, 5]

        graph = build_graph(TestAttributedVariadicSplitOp.nodes, TestAttributedVariadicSplitOp.edges,
                            {
                                'split_input_data': {'shape': None},
                                'split_op': {'axis': np.array(2), 'split_lengths': np.array([3, 3, 5]),
                                             'out_ports_count': 2},
                                'split_output_0_data': {'shape': shape_array(output_shape_1),
                                                        'value': None},
                                'split_output_1_data': {'shape': shape_array(output_shape_2),
                                                        'value': None},
                                'split_output_2_data': {'shape': shape_array(output_shape_3),
                                                        'value': None},
                            }
                            )
        node = Node(graph, 'split_op')
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)

        AttributedVariadicSplit.reverse_infer(node)

        actual_input_shape = node.in_port(0).data.get_shape()
        self.assertTrue(strict_compare_tensors(ref_input_shape, actual_input_shape))
Пример #10
0
 def re_number_output_port(loop_node: Node, old_port_id: int,
                           new_port_id: int):
     loop_node.add_output_port(new_port_id, skip_if_exist=True)
     loop_node.out_port(old_port_id).get_connection().set_source(
         loop_node.out_port(new_port_id))
     Loop.update_port_map_value(loop_node.output_port_map,
                                'external_port_id', old_port_id,
                                new_port_id)
Пример #11
0
    def test_port_renumber(self):
        graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'),
                                    ('Op1', 'Op1_data', {'out': 1}), ('Op1_data', 'Op2')])
        input_node = Node(graph, 'input')
        self.assertTrue(input_node.out_port(0).get_tensor_names(port_renumber=True) == ['Op1\\,Op2', 'input'])

        op1_node = Node(graph, 'Op1')
        op1_node.add_output_port(0)

        self.assertTrue(op1_node.out_port(0).get_tensor_names(port_renumber=True) == ['Op1\\,Op2'])
Пример #12
0
    def find_and_replace_pattern(self, graph: Graph):
        graph.stage = 'front'
        for node_id in graph.nodes(data=False):
            node = Node(graph, node_id)
            inputs = node.get_sorted_inputs()
            outputs = node.get_sorted_outputs()

            in_ports_count = node.in_ports_count if node.has_valid(
                'in_ports_count') else len(inputs)
            out_ports_count = node.out_ports_count if node.has_valid(
                'out_ports_count') else len(outputs)

            if len(outputs) > out_ports_count > 1:
                raise Error("Node {} has more children than it should: " +
                            "should be {} but there is {}".format(
                                node_id, out_ports_count, len(outputs)))

            node['_in_ports'] = {}
            node['_out_ports'] = {}
            if in_ports_count is not None:
                for idx in range(in_ports_count):
                    node.add_input_port(idx=idx)

            if out_ports_count is not None:
                for idx in range(out_ports_count):
                    node.add_output_port(idx=idx)
            idx = 0
            for in_node_id, edge_attrs in inputs:
                graph.remove_edge(in_node_id, node_id)
                if len(Node(graph, in_node_id).out_ports()) == 0:
                    Node(graph, in_node_id).add_output_port(0)
                in_node = Node(graph, in_node_id)
                in_node.out_port(edge_attrs['out']).connect(node.in_port(idx))
                # need to keep this attribute in edge for correct .mapping file generation and
                # for generation of "names" field in IR
                in_node.out_edge(
                    edge_attrs['out']
                )['fw_tensor_debug_info'] = edge_attrs['fw_tensor_debug_info']
                if idx < in_ports_count - 1:
                    idx = idx + 1

            idx = 0
            for out_node_id, edge_attrs in outputs:
                graph.remove_edge(node_id, out_node_id)
                if len(Node(graph, out_node_id).in_ports()) == 0:
                    Node(graph, out_node_id).add_input_port(0)
                node.out_port(idx).connect(
                    Node(graph, out_node_id).in_port(edge_attrs['in']))
                # need to keep this attribute in edge for correct .mapping file generation and
                # for generation of "names" field in IR
                node.out_edge(idx)['fw_tensor_debug_info'] = edge_attrs[
                    'fw_tensor_debug_info']
                if idx < out_ports_count - 1:
                    idx = idx + 1
Пример #13
0
    def test_front(self):
        graph = build_graph(nodes,
                            [('input', 'Op1', {'in': 0, 'out': 0, 'fw_tensor_debug_info': [('input', 'input'),
                                                                                           ('Op1', 'Op1,Op2')]})])
        graph.stage = 'front'
        input_node = Node(graph, 'input')
        self.assertTrue(input_node.out_port(0).get_tensor_names() == ['Op1\\,Op2', 'input'])

        op1_node = Node(graph, 'Op1')
        op1_node.add_output_port(0)
        self.assertTrue(op1_node.out_port(0).get_tensor_names() == [])
Пример #14
0
 def normalize_outputs(node: Node):
     if node.has_valid('out_ports_count') and len(
             node.out_edges()) < node.out_ports_count:
         from openvino.tools.mo.ops.result import Result  # Import is here to avoid circular import error
         for p in range(node.out_ports_count):
             if p not in node.out_ports():
                 node.add_output_port(p)
             if node.out_port(p).disconnected():
                 res_node = Result(
                     node.graph, {
                         'name': node.name + '/Fake_output_{}/'.format(p),
                         'keep_output_port': True
                     }).create_node()
                 node.out_port(p).connect(res_node.in_port(0))
Пример #15
0
    def test_middle(self):
        graph = build_graph(nodes, [('input', 'input_data'), ('input_data', 'Op1'),
                                    ('input_data', 'Op2')])

        input_node = Node(graph, 'input')
        self.assertTrue(input_node.out_port(0).get_tensor_names() == ['Op1\\,Op2', 'input'])

        op1_node = Node(graph, 'Op1')
        op1_node.add_output_port(0)
        self.assertTrue(op1_node.out_port(0).get_tensor_names() == [])

        op2_node = Node(graph, 'Op2')
        op2_node.add_output_port(0)
        self.assertTrue(op2_node.out_port(0).get_tensor_names() == [])
Пример #16
0
    def test_splitv_dynamic_input(self):
        graph = build_graph(self.nodes, self.edges,
                            {
                                'split_input_data': {'shape': shape_array([2, 12, dynamic_dimension_value, 30])},
                                'split_op': {'axis': np.array(2), 'split_lengths': np.array([2, 13, 10]),
                                             'out_ports_count': 4},
                            }
                            )
        node = Node(graph, 'split_op')
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)

        AttributedVariadicSplit.infer(node)

        self.assertTrue(len(node.out_edges()) == 3)
        self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10])))
Пример #17
0
    def test_case4_source(self):
        graph = build_graph(nodes, [('input', 'input_data'),
                                    ('input_data', 'Op1')])
        graph_ref = build_graph(nodes, [('input', 'input_data'),
                                        ('NewOp', 'NewOp_data'),
                                        ('NewOp_data', 'Op1')])

        op1_node = Node(graph, 'Op1')
        new_node = Node(graph, 'NewOp')
        new_node.add_output_port(0)
        new_node.out_port(0).get_connection().set_destination(
            op1_node.in_port(0), "source")

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'Op1',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
        self.check_graph_attrs_middle(graph, graph_ref)
Пример #18
0
    def test_splitv_zero_not_last(self):
        graph = build_graph(self.nodes, self.edges,
                            {
                                'split_input_data': {'shape': int64_array([2, 12, 25, 30])},
                                'split_op': {'axis': np.array(2), 'split_lengths': np.array([2, 13, 0, 10]),
                                             'out_ports_count': 4},
                            }
                            )
        node = Node(graph, 'split_op')

        # extractor should do it
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)
        node.out_port(2).get_connection().set_source(node.out_port(3))

        AttributedVariadicSplit.infer(node)

        self.assertTrue(node.out_port(3).disconnected())
        self.assertTrue(np.all(node.split_lengths == np.array([2, 13, 10])))
Пример #19
0
    def test_variadic_split_value_inference_with_uint32(self):
        axis = int64_array(2)
        # because sum of Python int and Numpy np.uint64 gives float64

        # but np.split accepts only integers and raises error for floats
        # therefore needed to explicitly cast np.split arguments into integer
        # added this test for that case
        lengths = mo_array([2, 13, 10], dtype=np.uint64)
        input_shape = mo_array([2, 12, 25, 30])
        input_value = np.zeros(input_shape)

        graph = build_graph(
            self.nodes, self.edges, {
                'split_input_data': {
                    'shape': input_shape,
                    'value': input_value
                },
                'split_axis_data': {
                    'value': axis
                },
                'split_lengths_data': {
                    'value': lengths
                },
                'split_op': {
                    'out_ports_count': 4
                },
            })
        node = Node(graph, 'split_op')
        for p in range(len(node.out_edges()), node.out_ports_count):
            node.add_output_port(p)

        VariadicSplit.infer(node)

        ont_nodes_count = len(node.out_edges())
        self.assertTrue(ont_nodes_count == 3)
        for out in range(ont_nodes_count):
            self.assertTrue(
                np.all(
                    node.out_node(out).shape == int64_array(
                        [2, 12, lengths[out], 30])))
Пример #20
0
def load_kalid_nnet2_model(graph, file_descr, nnet_name):
    input_name = 'Input'
    graph.add_node(input_name,
                   name=input_name,
                   kind='op',
                   op='Parameter',
                   parameters=None,
                   shape=None)

    prev_layer_id = input_name

    all_components = load_components(file_descr, graph)

    used_layers = set()
    for layer_id in all_components:
        prev_node = Node(graph, prev_layer_id)
        if prev_node.op == 'Parameter':
            parameters = Node(graph, layer_id).parameters
            input_dim = read_token_value(parameters, b'<InputDim>')
            prev_node['shape'] = np.array([1, input_dim], dtype=np.int64)
        prev_node.add_output_port(0)
        Node(graph, layer_id).add_input_port(0)
        graph.create_edge(
            prev_node, Node(graph, layer_id), 0, 0,
            create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
        used_layers.add(prev_layer_id)
        prev_layer_id = layer_id
        log.debug('{} and {} were connected'.format(prev_layer_id, layer_id))

    # Tensor names information corresponding to a node is stored on outgoing edges.
    # As output nodes do not have outgoing edges, fake outputs are required. In the following code
    # for each output Identity node is added, and tensor name for the output is kept
    # on (output, fake output) edge. After Result nodes adding transformation fake outputs
    # are deleted from graph.
    output_layers = graph.nodes - used_layers
    add_outputs_identity(
        graph, output_layers, lambda g, output, fake_output: g.create_edge(
            Node(g, output), Node(g, fake_output), 0, 0,
            create_edge_attrs(output, fake_output, output)))
Пример #21
0
    def replace_op(self, graph: Graph, node: Node):
        if node.use_peephole:
            raise Error(
                "BlockLSTM operation is not supported with `use_peephole`==True. Node: {}"
                "".format(node.soft_get('name')))

        if node.cell_clip != -1:
            raise Error(
                "Clipping is not supported for BlockLSTM operation. `cell_clip`={!s} for node: {}"
                "".format(node.cell_clip, node.soft_get('name')))

        log.debug(
            "Start BlockLSTM->LSTMSequence translation for node: {} with parameters:\n"
            "`cell_clip`={!s}, `use_peephole`=={!s}, `forget_bias`={!s}\n"
            "inputs: {},\noutputs:{}".format(
                node.soft_get('name'), node.cell_clip, node.use_peephole,
                node.forget_bias,
                {p: i.id
                 for p, i in node.in_nodes().items()},
                {p: o.id
                 for p, o in node.out_nodes().items()}))

        log.debug(
            "Cutting all inputs for peephole connection (5, 6, 7 input ports) off, as `use_peephole`=False"
        )

        for p, input_data in node.in_nodes().items():
            if p in [5, 6, 7]:
                key = self.find_key_by_input_port(node.in_node(p), node, p)
                assert key is not None
                graph.remove_edge(node.in_node(p).id, node.id, key=key)

        log.debug("Cutting seq_len_max input off")
        graph.remove_edge(node.in_node(0).id, node.id)
        """
        Reconnecting input edges of LSTMSequence:
        TF input edges:             Description:                 MO input edges:
              1                          input                        0
              4                         weights                       1
              8                         biases                        2
              3               h_prev: initial output of cell          3
              2               cs_prev: initial cell state             4
        """
        inputs = node.in_edges()
        assert 1 in inputs, "Sequence input to the BlockLSTM is required (1 port). Node {}".format(
            node.id)
        assert 2 in inputs, "Value of the initial cell state is required (2 port). Node {}".format(
            node.id)
        assert 3 in inputs, "Initial output of cell is required input to BlockLSTM (3 port). Node {}".format(
            node.id)
        assert 4 in inputs, "The weight matrix is required input to BlockLSTM (4 port) . Node {}".format(
            node.id)
        assert 8 in inputs, "The bias vector is required input to BlockLSTM (8 port). Node {}".format(
            node.id)

        inputs[3]['in'] = 3
        inputs[1]['in'] = 0
        inputs[4]['in'] = 1
        inputs[2]['in'] = 4
        inputs[8]['in'] = 2

        log.debug(
            "Checking for unsupported outputs usage (output ports: 0, 2, 3, 4, 5)"
        )
        for port, input_data in node.out_nodes().items():
            if port in [0, 2, 3, 4, 5]:
                raise Error(
                    "Output port {} of BlockLSTM node {} is not supported".
                    format(node.id, port))
        """
        Reconnecting output edges of LSTMSequence:
        TF output edges:             Description:                 MO output edges:
              6                     output h vector                     0
              1                   cell state before the tanh            1
        """

        outputs = node.out_edges()
        if 6 in outputs:
            outputs[6]['out'] = 0
            node.add_output_port(0, skip_if_exist=True)

        # do not replace any output edge
        return []
Пример #22
0
def merge_nodes(graph: Graph,
                nodes_to_merge_names: list,
                inputs_desc: list = None,
                outputs_desc: list = None):
    """
    Merges nodes specified in the set 'nodes_to_merge_names' into one mega-node, creating new edges between mega-node
    and inputs/outputs nodes of the mega-node. The added edges contain name of input/output nodes which will be used for
    generation of placeholders and will be saved to the IR xml so IE plug-in know how to map input/output data for the
    layer. Also the function adds protobufs of the nodes of the sub-graph and 'Const' ops consumed by nodes in the
    sub-graph to the node's attribute 'pbs'.
    :param graph: the graph object to operate on.
    :param nodes_to_merge_names: list of nodes names that should be merged into a single node.
    :param inputs_desc: optional list describing input nodes order.
    :param outputs_desc: optional list describing output nodes order.
    """
    if not is_connected_component(graph, nodes_to_merge_names):
        log.warning(
            "The following nodes do not form connected sub-graph: {}".format(
                nodes_to_merge_names))
        # graph.dump_graph_for_graphviz(nodes_to_dump=nodes_to_merge_names)

    new_node_name = graph.unique_id("TFSubgraphCall_")
    log.info("Create new node with name '{}' for nodes '{}'".format(
        new_node_name, ', '.join(nodes_to_merge_names)))
    graph.add_node(new_node_name)
    new_node_attrs = graph.node[new_node_name]

    new_node_attrs['name'] = new_node_name
    set_tf_custom_call_node_attrs(new_node_attrs)
    new_node = Node(graph, new_node_name)

    added_input_tensors_names = set(
    )  # set of tensors that are were added as input to the sub-graph
    added_new_node_output_tensors = dict(
    )  # key - tensor name, value - out port

    for node_name in nodes_to_merge_names:
        node = Node(graph, node_name)
        add_node_pb_if_not_yet_added(node, new_node)
        # TODO: any improvements?
        for in_node_name, edge_attrs in Node(graph, node_name).get_inputs():
            in_node = Node(graph, in_node_name)

            # internal edges between nodes of the sub-graph
            if in_node_name in nodes_to_merge_names:
                add_node_pb_if_not_yet_added(in_node, new_node)
                continue

            # edge outside of sub-graph into sub-graph
            if in_node_name not in nodes_to_merge_names:
                # we cannot use the 'in_node_name' as a protobuf operation name here
                # because the 'in_node_name' could be a sub-graph matched before.
                input_tensor_name = node.pb.input[edge_attrs['in']]
                if input_tensor_name not in added_input_tensors_names:
                    if not new_node.has_port('in', edge_attrs['in']):
                        new_node.add_input_port(edge_attrs['in'])
                    graph.add_edge(
                        in_node_name, new_node_name,
                        **merge_edge_props(
                            {
                                'in':
                                find_input_port(new_node, inputs_desc,
                                                node_name, edge_attrs['in']),
                                'out':
                                edge_attrs['out'],
                                'internal_input_node_name':
                                input_tensor_name,
                                'original_dst_node_name':
                                node_name,
                                'original_dst_port':
                                edge_attrs['in'],
                                'in_attrs': [
                                    'in', 'internal_input_node_name',
                                    'original_dst_node_name',
                                    'original_dst_port', 'placeholder_name'
                                ],
                                'out_attrs': ['out']
                            }, edge_attrs))
                    log.debug(
                        "Creating edge from outside of sub-graph to inside sub-graph: {} -> {}"
                        .format(in_node_name, new_node_name))
                    added_input_tensors_names.add(input_tensor_name)

        # edge from inside sub-graph to outside sub-graph
        for out_node_name, edge_attrs in Node(graph, node_name).get_outputs():
            if out_node_name not in nodes_to_merge_names:
                log.debug(
                    "Creating edge from inside of sub-graph to outside sub-graph: {} -> {}"
                    .format(new_node_name, out_node_name))
                out_name = internal_output_name_for_node(
                    node_name, edge_attrs['out'])
                if out_name not in added_new_node_output_tensors.keys():
                    added_new_node_output_tensors[out_name] = find_output_port(
                        new_node, outputs_desc, node_name, edge_attrs['out'])
                if not new_node.has_port(
                        'out', added_new_node_output_tensors[out_name]):
                    new_node.add_output_port(
                        added_new_node_output_tensors[out_name])
                graph.add_edge(
                    new_node_name, out_node_name,
                    **merge_edge_props(
                        {
                            'in': edge_attrs['in'],
                            'out': added_new_node_output_tensors[out_name],
                            'internal_output_node_name': out_name,
                            'in_attrs': ['in', 'internal_input_node_name'],
                            'out_attrs': ['out', 'internal_output_node_name']
                        }, edge_attrs))
        new_node['output_tensors_names'] = [
            val for val in
            {v: k
             for k, v in added_new_node_output_tensors.items()}.values()
        ]

    # add nodes using the same order as in initial GraphDef so we can dump them to IR in "correct" order
    new_node['nodes_order'] = [
        node for node in graph.graph['initial_nodes_order']
        if node in new_node['pbs'].keys()
    ]

    for n in nodes_to_merge_names:
        if graph.has_node(
                n):  # check if not deleted by another (similar) pattern
            graph.remove_node(n)
    return Node(graph, new_node_name)
Пример #23
0
def load_kalid_nnet1_model(graph, file_descr, name):
    prev_layer_id = 'Parameter'
    graph.add_node(prev_layer_id,
                   name=prev_layer_id,
                   kind='op',
                   op='Parameter',
                   parameters=None)

    # find out output layer, it can be only one due to chain structure of nnet1 model
    output_layer = None
    while True:
        component_type = find_next_component(file_descr)
        if component_type == end_of_nnet_tag.lower()[1:-1]:
            break

        layer_o = read_binary_integer32_token(file_descr)
        layer_i = read_binary_integer32_token(file_descr)

        if component_type == 'parallelcomponent':
            prev_layer_id = load_parallel_component(file_descr, graph,
                                                    prev_layer_id)
            find_end_of_component(file_descr, component_type)
            continue

        start_index = file_descr.tell()
        end_tag, end_index = find_end_of_component(file_descr, component_type)
        end_index -= len(end_tag)
        layer_id = graph.unique_id(prefix=component_type)
        graph.add_node(layer_id,
                       parameters=get_parameters(file_descr, start_index,
                                                 end_index),
                       op=component_type,
                       kind='op',
                       layer_i=layer_i,
                       layer_o=layer_o)
        if hasattr(graph, 'op_names_statistic'):
            graph.op_names_statistic[component_type] += 1

        prev_node = Node(graph, prev_layer_id)
        if prev_node.op == 'Parameter':
            prev_node['shape'] = np.array([1, layer_i], dtype=np.int64)

        prev_node.add_output_port(0)
        Node(graph, layer_id).add_input_port(0)
        graph.create_edge(
            prev_node, Node(graph, layer_id), 0, 0,
            create_edge_attrs(prev_layer_id, layer_id, prev_layer_id))
        prev_layer_id = layer_id
        output_layer = layer_id
        log.debug('{} (type is {}) was loaded'.format(prev_layer_id,
                                                      component_type))

    # Tensor names information corresponding to a node is stored on outgoing edges.
    # As output nodes do not have outgoing edges, fake outputs are required. In the following code
    # for each output Identity node is added, and tensor name for the output is kept
    # on (output, fake output) edge. After Result nodes adding transformation fake outputs
    # are deleted from graph.
    assert output_layer is not None, "Output layer is not found in graph"
    add_outputs_identity(
        graph, [output_layer], lambda g, output, fake_output: g.create_edge(
            Node(g, output), Node(g, fake_output), 0, 0,
            create_edge_attrs(output, fake_output, output)))
Пример #24
0
    def test_out_normalization(self):
        graph = build_graph(nodes_attrs={
            'input': {
                'kind': 'op',
                'op': 'Parameter',
                'name': 'node'
            },
            'input_data': {
                'kind': 'data'
            },
            'pool': {
                'kind': 'op',
                'name': 'node',
                'type': 'Pooling',
                'pool_method': 'max'
            },
            'pool_data': {
                'kind': 'data'
            },
            'result': {
                'kind': 'op',
                'op': 'Result',
                'name': 'node'
            }
        },
                            edges=[('input', 'input_data'),
                                   ('input_data', 'pool'),
                                   ('pool', 'pool_data'),
                                   ('pool_data', 'result')])

        graph_ref = build_graph(nodes_attrs={
            'input': {
                'kind': 'op',
                'op': 'Parameter',
                'name': 'node'
            },
            'input_data': {
                'kind': 'data'
            },
            'pool': {
                'kind': 'op',
                'name': 'node',
                'type': 'MaxPool'
            },
            'pool_data': {
                'kind': 'data'
            },
            'pool_data_added': {
                'kind': 'data'
            },
            'result': {
                'kind': 'op',
                'op': 'Result',
                'name': 'node'
            },
            'result_added': {
                'kind': 'op',
                'op': 'Result',
                'name': 'node'
            }
        },
                                edges=[('input', 'input_data'),
                                       ('input_data', 'pool'),
                                       ('pool', 'pool_data'),
                                       ('pool_data', 'result'),
                                       ('pool', 'pool_data_added'),
                                       ('pool_data_added', 'result_added')])

        pool_op = Node(graph, 'pool')
        pool_op.add_output_port(
            1)  # add disconnected output port to check normalization

        MaxPool().find_and_replace_pattern(graph)

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'result',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
    def test_add_output_1(self):
        sub_graph_2 = build_graph(nodes_attrs=sub_graph_2_nodes,
                                  edges=[
                                      *connect('cond_2_int', 'cond_2_int_out'),
                                      *connect('in_2_int', 'OUT_2'),
                                      *connect('ones', 'OUT_2'),
                                      *connect('OUT_2', 'OUT_2_out'),
                                      *connect('in_2_int', 'in_2_int_out')
                                  ],
                                  nodes_with_edges_only=True)

        sub_graph_1 = build_graph(nodes_attrs=sub_graph_1_nodes,
                                  edges=[
                                      *connect('M_2', '0:Loop_2'),
                                      *connect('cond_2', '1:Loop_2'),
                                      *connect('IN_2', '2:Loop_2'),
                                      *connect('Loop_2:0', 'Loop_2_out'),
                                      *connect('in_1_int', 'in_1_int_out'),
                                      *connect('cond_1_int', 'cond_1_int_out')
                                  ],
                                  nodes_with_edges_only=True)
        loop_node_1 = Node(sub_graph_1, 'Loop_2')
        loop_node_1.body = sub_graph_2

        main_graph = build_graph(nodes_attrs=main_graph_nodes,
                                 edges=[
                                     *connect('M', '0:Loop'),
                                     *connect('cond', '1:Loop'),
                                     *connect('IN_2', '2:Loop'),
                                     *connect('IN_1', "3:Loop"),
                                     *connect('Loop:0', 'OUT_1')
                                 ],
                                 nodes_with_edges_only=True)
        loop_node = Node(main_graph, 'Loop')
        loop_node.body = sub_graph_1
        main_graph.graph['additional_outputs'] = ['Loop', 'Loop_2']
        loop_node_1['out_ports_count'] = 2
        loop_node_1.add_output_port(1)
        loop_node_1['output_port_map'].append({
            'external_port_id': 1,
            'internal_layer_id': 8,
            'axis': None
        })

        loop_node_output_port_map_len = len(loop_node.output_port_map)
        loop_node_out_ports_len = len(loop_node.out_ports())
        loop_2_out_ports_len = len(loop_node_1.out_ports())
        max_layer_id = 5

        results = AddOutputRecursive().find_and_replace_pattern(main_graph)

        self.assertEqual(len(results), 2)
        loop_node = Node(main_graph, 'Loop')
        self.assertEqual(len(loop_node.output_port_map),
                         loop_node_output_port_map_len + 2)
        self.assertEqual(len(loop_node.out_ports()),
                         loop_node_out_ports_len + 2)
        self.assertEqual(
            loop_node.out_port(1).get_destination().node.op, 'Result')
        self.assertTrue(
            np.all(
                loop_node.out_port(1).data.get_shape() == int64_array(
                    [5, 10, 4, 64, 54])))
        last_node = Node(sub_graph_1, 'Loop_2')
        self.assertEqual(len(last_node.out_ports()), loop_2_out_ports_len)
        unsq_node = last_node.out_port(0).get_destinations()[1].node
        self.assertEqual(unsq_node.op, 'Unsqueeze')
        self.assertEqual(
            unsq_node.out_port(0).get_destination().node.op, 'Result')
        self.assertEqual(
            unsq_node.out_port(0).get_destination().node.internal_layer_id,
            max_layer_id + 3)
        self.assertTrue(
            np.all(
                unsq_node.out_port(0).data.get_shape() == int64_array(
                    [1, 10, 4, 64, 54])))
Пример #26
0
def muladd_to_scaleshift_action(graph: Graph, match: dict):
    mul = match['mul']
    add = match['add']
    output = match['output']

    # Pass works correctly only in case when node have only 1 output
    if len(mul.out_port(0).get_destinations()) > 1:
        return

    if mul.soft_get('can_be_scaleshift') is False or add.soft_get('can_be_scaleshift') is False:
        return

    mul_weights_id = get_value_id(mul)
    mul_input_id = get_tensor_id(mul)
    add_weights_id = get_value_id(add)

    if mul_weights_id is None:
        log.debug("Mul->Add to ScaleShift: Mul {} has no weights".format(mul.name))
        return
    if mul_input_id is None:
        log.debug("Mul->Add to ScaleShift: Mul {} has no input".format(mul.name))
        return
    if add_weights_id is None:
        log.debug("Mul->Add to ScaleShift: Add {} has no weights".format(add.name))
        return

    input = mul.in_node(mul_input_id)
    weights = mul.in_node(mul_weights_id)
    bias = add.in_node(add_weights_id)

    # Transform values
    weights.value = np.squeeze(weights.value)
    weights.shape = int64_array(weights.value.shape)

    bias.value = np.squeeze(bias.value)
    bias.shape = int64_array(bias.value.shape)

    # Broadcast weights if they are scalar
    if weights.value.ndim == 0 and bias.value.ndim == 1:
        weights.value = np.full(bias.shape, weights.value.item(), dtype=weights.value.dtype)
        weights.shape = int64_array(weights.value.shape)

    if bias.shape != weights.shape:
        log.warning('Mul->Add to ScaleShift conversion stopped {} != {}'.format(weights.shape, bias.shape))
        return

    if bias.value.ndim != weights.value.ndim or bias.value.size != weights.value.size:
        log.debug("Skipping Mul->Add to ScaleShift conversion for nodes {}, {} because of different weights "
                  "and biases".format(mul.name, add.name))
        return

    if bias.value.size == 1 and weights.value.size == 1:
        log.debug("Skipping Mul->Add to ScaleShift conversion for nodes {}, {}. Will be converted to Power"
                  "".format(mul.name, add.name))
        return

    op_name = "ScaleShift"

    log.debug("Fusing Mul->Add to {}. Input nodes: {} and {}, bias.shape = {}, weights.shape = {}"
              "".format(op_name, mul.id, add.id, bias.shape, weights.shape))

    graph.remove_edge(input.node, mul.id)
    graph.remove_edge(weights.node, mul.id)
    graph.remove_edge(bias.node, add.id)
    graph.remove_edge(add.node, output.id)

    op_node = graph.unique_id(mul.name + '/Fused{}_'.format(op_name))

    graph.add_node(op_node, **add_attrs_props(dict(kind='op', type=op_name, name=op_node, op=op_name,
                                                   data_type=input.data_type)))
    scsh = Node(graph, op_node)
    scsh.add_input_port(0)
    scsh.add_input_port(1)
    scsh.add_input_port(2)
    scsh.add_output_port(0)

    update_ie_fields(graph.node[op_node])

    graph.add_edges_from([
        (input.node, op_node, {'in': 0}),
        (weights.node, op_node, {'in': 1, 'bin': 'weights'}),
        (bias.node, op_node, {'in': 2, 'bin': 'biases'}),
        (op_node, output.node, {'out': 0})
    ])

    return
Пример #27
0
def rnn_infer(node: Node, out_ports=None):
    """
    General infer function for RNN, GRU, LSTM layers.
    Assume that 0-port input of node is input data for recurrent layer and node have attrs:
    hidden_size,
    """
    if out_ports is None:
        out_ports = []

    # 1. Necessary checks (from ONNX specification)
    assert node.batch_dim <= 1
    assert node.sequence_dim <= 1
    assert node.batch_dim != node.sequence_dim
    assert node.direction in ['forward', 'reverse', 'bidirectional']

    if node.blobs_wrb:
        mark_input_bins(node, ['W', 'R', 'B'])
    else:
        mark_input_bins(node)

    # 2. Output shape calculations
    input_shape = node.in_node(0).shape
    assert len(input_shape) == 3

    # Reshape input nodes
    for port in [2, 3]:
        if port in node.in_nodes() and len(node.in_node(port).in_nodes()) > 0 and \
                'zero_shapes' in node.in_node(port).in_node():
            for i in node.in_node(port).in_node().zero_shapes:
                if node.in_node(port).shape[i] != input_shape[i]:
                    node.in_node(port).value = np.repeat(
                        node.in_node(port).value, input_shape[i], axis=i)
                    node.in_node(port).shape[i] = input_shape[i]

    out_shape = [
        input_shape[node.sequence_dim], input_shape[node.batch_dim],
        node.hidden_size
    ]

    if node.batch_dim == 0:
        out_shape = [
            input_shape[node.batch_dim], input_shape[node.sequence_dim],
            node.hidden_size
        ]

    num_directions = 2 if node.direction in ['bidirectional'] else 1
    if node.has_num_directions:
        if node.format == 'mxnet' and node.normalized is False:
            # In MXNet RNN layer return output with shape [seq_len, batch_size, hidden_size * num_directions]
            out_shape[-1] *= num_directions
        else:
            # ONNX-like, insert extra dimension to output shape for num_directions
            out_shape = shape_insert(out_shape, 1, np.int64(num_directions))

    # 0 output is required creating it if doesn't exist
    if 0 not in node.out_nodes():
        data_node = Op._create_data_node(node.graph,
                                         name=node.node +
                                         '/ExtraOutput/{}'.format(0),
                                         attrs={'executable': True})
        if 0 not in node.out_ports():
            node.add_output_port(0)
        node.graph.add_edge(node.id, data_node.id, key=0, out=0)
        add_opoutput(node.graph, data_node.id, 0, False)
    node.out_port(0).data.set_shape(out_shape)

    # 3. Extra outputs for hidden/cell states shape calculations (optional)
    state_size = [input_shape[node.batch_dim], node.hidden_size]
    if node.has_num_directions:
        state_size = shape_insert(state_size, 0, num_directions)

    if node.multilayers:
        # For multilayer case state sizes from every layer will be concatenated by last axis
        num_layers = node.num_layers
        state_size[-1] *= num_layers

    for i in out_ports:
        # If node hasn't consumers for hidden/cells state -> create them
        if i not in node.out_nodes():
            data_node = Op._create_data_node(node.graph,
                                             name=node.node + '/ExtraOutput/' +
                                             str(i),
                                             attrs={'executable': True})
            if i not in node.out_ports():
                node.add_output_port(i)
            node.graph.add_edge(node.id, data_node.id, key=0, out=i)
            add_opoutput(node.graph, data_node.id, 0, False)
        else:
            data_node = node.out_node(i)
        data_node.shape = shape_array(state_size)
Пример #28
0
def build_graph_with_attrs(nodes_with_attrs: list,
                           edges_with_attrs: list,
                           new_nodes_with_attrs: list = [],
                           new_edges_with_attrs: list = [],
                           update_edge_attrs: dict = None,
                           update_nodes_attributes: list = None,
                           nodes_with_edges_only: bool = False,
                           add_nodes_from_edges: bool = False):
    """
    Build the Graph with specific nodes and edges. Also update of edge and node parameters is supported.
    :param nodes_with_attrs: list of tuples ('node_name', {node_attrs})
    :param edges_with_attrs: list of tuples like (start node, end node, (optional) {attrs of the edge}).
    :param new_nodes_with_attrs: analogically nodes_with_attrs
    :param new_edges_with_attrs: analogically new_edges
    :param update_edge_attrs: optional dictionary like {('from_node', 'to_node', key): {edge_attrs}}.
    :param update_nodes_attributes: optional list of tuples which specifies nodes names and their attributes to be
    updated. The first element is a node name to update attribute and the second element is a dictionary with attribute
    name and its value.
    :param nodes_with_edges_only: add nodes which has at least one incoming or outcoming edge.
    :param add_nodes_from_edges: whether nodes that is not listed in all_nodes but are in all_edges is allowed.
    :return: generated graph.
    """
    if not_all_new([node[0] for node in nodes_with_attrs],
                   [node[0] for node in new_nodes_with_attrs]):
        raise Error(
            'Some nodes from new_nodes_with_attrs are already in nodes.'
            ' Please, add to new_nodes_with_attrs only NEW nodes.')

    if not_all_new([(edge[0], edge[1]) for edge in edges_with_attrs],
                   [(edge[0], edge[1]) for edge in new_edges_with_attrs]):
        raise Error(
            'Some edges from new_edges_with_attrs are already in edges.'
            ' Please, add to new_edges_with_attrs only NEW edges.')

    # Check that all nodes from list of edges are in nodes
    all_nodes = nodes_with_attrs + new_nodes_with_attrs
    all_edges = edges_with_attrs + new_edges_with_attrs
    all_nodes_names = [node[0] for node in all_nodes]
    if not add_nodes_from_edges and not all_edges_in_nodes(
            nodes=all_nodes_names, edges=all_edges):
        raise Error(
            "Some nodes from list of edges is not in nodes. Please, add all necessary nodes."
        )

    graph = Graph()

    # Create dict for nodes with attrs
    nodes_attrs = {}
    for node_name, attrs in all_nodes:
        nodes_attrs[node_name] = attrs
        if 'name' not in attrs:
            attrs['name'] = node_name

    if nodes_with_edges_only:
        # filter nodes to keep only ones with edges connected
        filtered_nodes = {}
        for edge in all_edges:
            node_1, node_2 = edge[0], edge[1]
            filtered_nodes[node_1] = nodes_attrs[node_1]
            filtered_nodes[node_2] = nodes_attrs[node_2]
        nodes_attrs = filtered_nodes

    # Create all nodes
    for node, attrs in nodes_attrs.items():
        graph.add_node(node, **deepcopy(attrs))

    # Connect nodes with edges (also unpack edge params)
    for edge in all_edges:
        node_1, node_2 = edge[0], edge[1]
        edge_attrs = edge[2] if len(edge) == 3 else {}
        graph.add_edge(node_1, node_2, **edge_attrs)

    # Update attributes of edges
    if update_edge_attrs:
        # it will work in 2.x networkx only
        for edge, attr in update_edge_attrs.items():
            for k, v in attr.items():
                nx.set_edge_attributes(G=graph, name=k, values={edge: v})

    # Update attributes of nodes
    if update_nodes_attributes is not None:
        for node_name, new_attrs in update_nodes_attributes:
            assert (node_name in graph.nodes())
            for attr, value in new_attrs.items():
                graph.node[node_name][attr] = value

    for node_id in graph.nodes():
        node = Node(graph, node_id)
        check_and_update_ports(node, [
            graph.get_edge_data(edge[0], node_id)[0]
            for edge in graph.in_edges(node_id)
        ], True)
        check_and_update_ports(node, [
            graph.get_edge_data(node_id, edge[1])[0]
            for edge in graph.out_edges(node_id)
        ], False)

    for node in graph.get_op_nodes():
        # Add in_ports attribute
        in_edges = node.in_edges()
        for i in range(len(in_edges)):
            node.add_input_port(idx=i)

        # Add out_ports attribute
        out_edges = node.out_edges()
        for i in range(len(out_edges)):
            node.add_output_port(idx=i)
    return graph
Пример #29
0
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
    """
    Load ParallelComponent of the Kaldi model.
    ParallelComponent contains parallel nested networks.
    VariadicSplit is inserted before nested networks.
    Outputs of nested networks concatenate with layer Concat.

    :param file_descr: descriptor of the model file
    :param graph: graph with the topology.
    :param prev_layer_id: id of the input layers for parallel component layer
    :return: id of the concat layer - last layer of the parallel component layers
    """
    nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
    log.debug(
        'Model contains parallel component with {} nested networks'.format(
            nnet_count))

    split_points = []
    outputs = []
    inputs = []

    for i in range(nnet_count):
        read_token_value(file_descr, b'<NestedNnet>')
        collect_until_token(file_descr, b'<Nnet>')
        g = Graph()
        load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i))

        # input to nnet1 models is of a rank 1 but we also insert batch_size to 0th axis
        # 1st axis contains input_size of the nested subnetwork
        # we split input from the main network to subnetworks
        input_node = Node(g, 'Parameter')
        split_points.append(input_node['shape'][1])
        g.remove_node(input_node.id)

        mapping = {
            node: graph.unique_id(node)
            for node in g.nodes(data=False) if node in graph
        }
        g = nx.relabel_nodes(g, mapping)
        for val in mapping.values():
            g.node[val]['name'] = val
        graph.add_nodes_from(g.nodes(data=True))
        graph.add_edges_from(g.edges(data=True))
        sorted_nodes = tuple(nx.topological_sort(g))

        outputs.append(Node(graph, sorted_nodes[-1]))
        inputs.append(Node(graph, sorted_nodes[0]))

    split_id = graph.unique_id(prefix='NestedNets/VariadicSplit')
    attrs = {
        'out_ports_count': nnet_count,
        'size_splits': split_points,
        'axis': 1,
        'name': split_id
    }
    variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node()
    prev_layer_node = Node(graph, prev_layer_id)
    prev_layer_node.add_output_port(0)
    graph.create_edge(
        prev_layer_node, variadic_split_node, 0, 0,
        create_edge_attrs(prev_layer_id, variadic_split_node.id,
                          prev_layer_id))

    concat_id = graph.unique_id(prefix='Concat')
    graph.add_node(concat_id, parameters=None, op='concat', kind='op')
    concat_node = Node(graph, concat_id)

    # Connect each output of variadic_split_node to each subnetwork's inputs in ParallelComponent
    # and each subnetwork's output to concat_node
    for i, (input_node, output_node) in enumerate(zip(inputs, outputs)):
        output_node.add_output_port(0)
        concat_node.add_input_port(i)
        graph.create_edge(
            output_node, concat_node, 0, i,
            create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0))
        graph.create_edge(
            variadic_split_node, input_node, i, 0,
            create_edge_attrs(variadic_split_node.id, input_node.id,
                              variadic_split_node.id, 0, i))
    return concat_id