def _create_node(attrs: dict):
     pb = onnx.helper.make_node("Affine", ["X"], ["Y"], **attrs)
     graph = build_graph({'node_0': {'pb': pb}}, [])
     return Node(graph, 'node_0')
Example #2
0
def op_type(graph, node_name: str):
    node = Node(graph, node_name)
    if node.has_valid('kind') and node['kind'] == 'op':
        return node['op']
    else:
        return None
Example #3
0
 def test_infer_invalid4(self):
     graph = build_graph(nodes_attributes, edges1, inputs4_inv)
     ctcgreedydecoder_node = Node(graph, 'ctcgreedydecoder_node')
     self.assertRaises(AssertionError, CTCGreedyDecoderSeqLenOp.infer,
                       ctcgreedydecoder_node)
Example #4
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['op']

        if node.name == 'iteration_number_out':
            return

        # calculate length of context when state of inference becomes meaningful
        inputs = []
        for n in graph.get_op_nodes(**{'op': 'Parameter'}):
            inputs.append(n)

        in_nodes = []
        for inp in inputs:
            for ins in inp.out_port(0).get_destinations():
                in_nodes.append(ins.node.name)

        context_len = 1
        try:
            subgraph = invert_sub_graph_between_nodes(
                graph, [node.in_port(0).get_source().node.name], in_nodes)
        except Error:
            return

        for n in subgraph:
            n_node = Node(graph, n)
            if n_node.kind == 'op' and n_node.op == 'Splice':
                context_len += len(n_node.context) - 1

        if context_len == 1:
            return

        in_node_port = node.in_port(0).get_source()
        in_node_shape = node.in_port(0).data.get_shape()
        node.in_port(0).disconnect()

        # add Select before saving state to avoid saving garbage
        select_node = Select(graph, {
            'name': 'select_' + node.name
        }).create_node()
        zero_else = Const(graph, {
            'name': 'zero_else',
            'value': np.zeros(in_node_shape)
        }).create_node()
        select_node.in_port(1).connect(in_node_port)
        select_node.in_port(2).connect(zero_else.out_port(0))

        # check if we have already appropriate iteration counter
        existing_counters = find_pattern_matches(
            graph,
            nodes=[('mem_in',
                    dict(op='Memory',
                         index=1,
                         shape=int64_array([context_len]))),
                   ('mem_in_data', dict()),
                   ('crop_mem_in',
                    dict(op='Crop',
                         axis=int64_array([1]),
                         offset=int64_array([1]),
                         dim=int64_array([context_len - 1]))),
                   ('crop_mem_in_data', dict()),
                   ('concat', dict(op='Concat', axis=1)),
                   ('concat_data', dict()), ('const_1', dict(op='Const')),
                   ('const_1_data', dict()),
                   ('mem_out',
                    dict(op='Memory',
                         index=0,
                         shape=int64_array([context_len]))),
                   ('crop_out',
                    dict(op='Crop',
                         axis=int64_array([1]),
                         offset=int64_array([0]),
                         dim=int64_array([1]))), ('crop_out_data', dict()),
                   ('select', dict(op='Select'))],
            edges=[('mem_in', 'mem_in_data'), ('mem_in_data', 'crop_mem_in'),
                   ('crop_mem_in', 'crop_mem_in_data'),
                   ('crop_mem_in_data', 'concat', {
                       'in': 0
                   }), ('const_1', 'const_1_data'),
                   ('const_1_data', 'concat', {
                       'in': 1
                   }), ('concat', 'concat_data'), ('concat_data', 'mem_out'),
                   ('concat_data', 'crop_out'), ('crop_out', 'crop_out_data'),
                   ('crop_out_data', 'select')])
        counter_match = next(existing_counters, None)
        if counter_match is not None:
            input_port = Node(
                graph,
                inverse_dict(counter_match)['crop_out']).out_port(0)
        else:
            mem_out = Memory(
                graph, {
                    'name': 'iteration_number',
                    'size': 2,
                    'index': 1,
                    'id': 'iteration_' + node.name,
                    'shape': int64_array([context_len]),
                    'dst_type': np.int32
                }).create_node()
            cut_first = Crop(
                graph, {
                    'name': 'cut_first',
                    'axis': int64_array([1]),
                    'offset': int64_array([1]),
                    'dim': int64_array([context_len - 1])
                }).create_node()
            cut_first.in_port(0).connect(mem_out.out_port(0))
            ones = Const(graph, {
                'name': 'ones',
                'value': np.ones([1, 1], dtype=np.int32)
            }).create_node()
            concat = Concat(graph, {
                'name': 'concat_ones',
                'in_ports_count': 2,
                'axis': 1
            }).create_node()
            concat.in_port(0).connect(cut_first.out_port(0))
            concat.in_port(1).connect(ones.out_port(0))
            mem_in = Memory(
                graph, {
                    'name': 'iteration_number_out',
                    'size': 2,
                    'index': 0,
                    'id': 'iteration_' + node.name,
                    'shape': int64_array([context_len])
                }).create_node()
            mem_in.in_port(0).connect(concat.out_port(0))
            res = Result(graph, {}).create_node()
            mem_in.out_port(0).connect(res.in_port(0))
            cut_last = Crop(
                graph, {
                    'name': 'cut_last',
                    'axis': int64_array([1]),
                    'offset': int64_array([0]),
                    'dim': int64_array([1])
                }).create_node()
            cut_last.in_port(0).connect(concat.out_port(0))
            input_port = cut_last.out_port(0)

        select_node.in_port(0).connect(input_port)
        select_node.out_port(0).connect(node.in_port(0))
        select_node.out_port(0).data.set_shape(in_node_shape)
def copy_graph_with_ops(graph: Graph) -> Graph:
    """
    Function to copy graph and apply extenders to appropriate nodes
    :param graph: Graph to copy
    :return:Copied graph with applyed extenders
    """
    new_graph = Graph()
    new_graph.stage = 'back'
    new_graph.graph = graph.graph

    node_connections = dict()
    mapping_of_old_idx_into_new = dict()

    restore_correct_ports(graph)

    # Nodes preprocessing stage in source graph
    for op in graph.get_op_nodes():
        if op.soft_get('type') in preprocessing_op_nodes:
            preprocessing_op_nodes[op.type](op)

    # Create a new copy of graph with correct attributes (shape & type infer, backend attrs etc.)
    for op in graph.get_op_nodes():

        # Apply extenders to nodes in source graph
        if op.type in Extender.registered_ops:
            Extender.get_extender_class_by_name(op.type).extend(op)
        else:
            log.debug(
                'Extender for node {} with type={} not found, please note.'.
                format(op.name, op.type))

        # Add node with necessary type and extended attrs in new graph
        op_type = op.soft_get('type_to_create', op.type)

        if op_type in custom_ops:
            node = custom_ops[op_type](new_graph, op.attrs()).create_node()
        else:
            assert op_type in Op.registered_ops, 'Operation {} not found in MO operations, ' \
                                                 'please check it!'.format(op_type)
            node = Op.get_op_class_by_name(op_type)(new_graph,
                                                    op.attrs()).create_node()

        # Collect node connections
        mapping_of_old_idx_into_new[op.id] = node.id
        node_connections[op.id] = collect_node_outputs(op)

    # Restore connections in new graph
    for input_node_idx, its_outputs in list(node_connections.items()):
        for out_port_idx, out_port_dest in its_outputs.items():
            for dest_in_port_idx, dest_node_idx in out_port_dest:
                src = Node(new_graph,
                           mapping_of_old_idx_into_new[input_node_idx])
                dst = Node(new_graph,
                           mapping_of_old_idx_into_new[dest_node_idx])
                src.out_port(out_port_idx).connect(
                    dst.in_port(dest_in_port_idx))

    # Nodes postprocessing stage in new graph
    for op in new_graph.get_op_nodes():
        if op.soft_get('type') in postprocessing_op_nodes:
            postprocessing_op_nodes[op.type](op)

    # clean up graph to shape inference
    new_graph.clean_up()

    return new_graph
Example #6
0
def load_components(file_descr, graph, component_layer_map=None):
    num_components = collect_until_token_and_read(file_descr, b'<NumComponents>')
    log.debug('Network contains {} components'.format(num_components))
    is_nnet3 = False if component_layer_map is None else True

    if not is_nnet3:
        collect_until_token(file_descr, b'<Components>')

    all_components = list()
    name = ""
    for _ in range(num_components):
        if is_nnet3:
            name = collect_until_token_and_read(file_descr, b'<ComponentName>', np.string_)

        component_type = find_next_component(file_descr)
        if component_type == end_of_nnet_tag.lower()[1:-1]:
            break

        start_index = file_descr.tell()
        end_tag, end_index = find_end_of_component(file_descr, component_type)
        # read dim info where possible to simplify shape calculation for MemoryOffset
        # shape calculation for MemoryOffset can't be done through shape of previous layer because
        # it is separated in 2 parts to remove cycle from graph
        file_descr.seek(start_index)
        dim = 0
        dim_words = {b'<Dim>', b'<InputDim>'}
        for dim_word in dim_words:
            try:
                collect_until_token(file_descr, dim_word, size_search_zone=end_index - start_index)
                cur_index = file_descr.tell()
                if start_index < cur_index < end_index:
                    dim = read_binary_integer32_token(file_descr)
                    break
                else:
                    file_descr.seek(start_index)
            except Error:
                file_descr.seek(start_index)

        if is_nnet3:
            if name in component_layer_map:
                layer_id = component_layer_map[name][0]
                for layer in component_layer_map[name]:
                    node = Node(graph, layer)
                    node['parameters'] = get_parameters(file_descr, start_index, end_index)
                    node['op'] = component_type
                    # Read dim info where possible to simplify shape calculation for MemoryOffset
                    for o_n_name, params in node.get_outputs():
                        o_n = Node(graph, o_n_name)
                        if o_n['op'] == 'MemoryOffset' and dim != 0:
                            o_n['parameters']['element_size'] = dim
            else:
                raise Error("Something wrong with layer {}".format(name))
        else:
            layer_id = graph.unique_id(prefix=component_type)
            graph.add_node(layer_id,
                           parameters=get_parameters(file_descr, start_index, end_index),
                           op=component_type,
                           kind='op')

        all_components.append(layer_id)
        log.debug('{} (type is {}) was loaded'.format(layer_id, component_type))

    return all_components
Example #7
0
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
    """
    Load ParallelComponent of the Kaldi model.
    ParallelComponent contains parallel nested networks.
    VariadicSplit is inserted before nested networks.
    Outputs of nested networks concatenate with layer Concat.

    :param file_descr: descriptor of the model file
    :param graph: graph with the topology.
    :param prev_layer_id: id of the input layers for parallel component layer
    :return: id of the concat layer - last layer of the parallel component layers
    """
    nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
    log.debug('Model contains parallel component with {} nested networks'.format(nnet_count))

    split_points = []
    outputs = []
    inputs = []

    for i in range(nnet_count):
        read_token_value(file_descr, b'<NestedNnet>')
        collect_until_token(file_descr, b'<Nnet>')
        g = Graph()
        load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i))

        # input to nnet1 models is of a rank 1 but we also insert batch_size to 0th axis
        # 1st axis contains input_size of the nested subnetwork
        # we split input from the main network to subnetworks
        input_node = Node(g, 'Parameter')
        split_points.append(input_node['shape'][1])
        g.remove_node(input_node.id)

        mapping = {node: graph.unique_id(node) for node in g.nodes(data=False) if node in graph}
        g = nx.relabel_nodes(g, mapping)
        for val in mapping.values():
            g.node[val]['name'] = val
        graph.add_nodes_from(g.nodes(data=True))
        graph.add_edges_from(g.edges(data=True))
        sorted_nodes = tuple(nx.topological_sort(g))

        outputs.append(Node(graph, sorted_nodes[-1]))
        inputs.append(Node(graph, sorted_nodes[0]))

    split_id = graph.unique_id(prefix='NestedNets/VariadicSplit')
    attrs = {'out_ports_count': nnet_count, 'size_splits': split_points, 'axis': 1, 'name': split_id}
    variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node()
    prev_layer_node = Node(graph, prev_layer_id)
    prev_layer_node.add_output_port(0)
    graph.create_edge(prev_layer_node, variadic_split_node, 0, 0, create_edge_attrs(prev_layer_id, variadic_split_node.id, prev_layer_id))

    concat_id = graph.unique_id(prefix='Concat')
    graph.add_node(concat_id, parameters=None, op='concat', kind='op')
    concat_node = Node(graph, concat_id)

    # Connect each output of variadic_split_node to each subnetwork's inputs in ParallelComponent
    # and each subnetwork's output to concat_node
    for i, (input_node, output_node) in enumerate(zip(inputs, outputs)):
        output_node.add_output_port(0)
        concat_node.add_input_port(i)
        graph.create_edge(output_node, concat_node, 0, i, create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0))
        graph.create_edge(variadic_split_node, input_node, i, 0, create_edge_attrs(variadic_split_node.id, input_node.id, variadic_split_node.id, 0, i))
    return concat_id
Example #8
0
    def create_node_with_data(self,
                              inputs: list = None,
                              attrs: dict = None,
                              data_nodes: [Node, np.ndarray, list] = None,
                              edge_attrs: list = None):
        """
        Creates a new node with given inputs and attrs and also creates data node that
        holds the op output value. Inputs should be data nodes (not op nodes).
        Work for ops with a single output port only.
        Edge attributes in edge_attrs go in order of items in 'inputs'
        """
        if inputs is None:
            inputs = []
        if attrs is None:
            attrs = {}
        # No need to extract port, because input node should be a data node,
        # so there is no choice.
        new_op_node = self.add_node(attrs)

        # TODO Preserve debug information
        inputs_with_edge_attrs = []
        for i, inp in enumerate(inputs):
            if inp is None:
                continue
            edge_attr = {'in': i}
            if edge_attrs is not None and i < len(edge_attrs):
                edge_attr.update(edge_attrs[i])
            inputs_with_edge_attrs.append((inp.id, new_op_node.id, edge_attr))
            new_op_node.add_input_port(i, skip_if_exist=True)

        self.graph.add_edges_from(inputs_with_edge_attrs)

        # TODO: Extend to the case when multiple output ports
        old_data_value = [None]
        old_data_shape = [None]
        if data_nodes is None:
            data_node = self.graph.unique_id()
            self.graph.add_node(
                data_node,
                **add_attrs_props(
                    dict(kind='data',
                         name=data_node,
                         value=None,
                         shape=None,
                         data_type=None,
                         infer=None)))
            data_nodes = [Node(self.graph, data_node)]
        else:
            if type(data_nodes) not in [list, np.ndarray]:
                data_nodes = [data_nodes]
            old_data_value = [
                data_node.value.copy()
                if data_node.has_valid('value') else None
                for data_node in data_nodes
            ]
            old_data_shape = [
                data_node.shape.copy()
                if data_node.has_valid('shape') else None
                for data_node in data_nodes
            ]
        for id, data_node in enumerate(data_nodes):
            self.graph.add_edges_from([(new_op_node.id, data_node.id, {
                'out': id
            })])

        if new_op_node.has_valid('infer'):
            if log.getLogger().isEnabledFor(log.DEBUG):
                log.debug(
                    'Start running infer function for individual op node with attributes: {}'
                    ''.format(str(new_op_node)))
            new_op_node.infer(new_op_node)
            if new_op_node.has('nchw_layout'):
                for out_node in new_op_node.out_nodes().values():
                    out_node['nchw_layout'] = new_op_node.nchw_layout
            assert all(
                old_value is None for old_value in old_data_value) or all([
                    strict_compare_tensors(old_data_value[id], data_node.value)
                    for id, data_node in enumerate(data_nodes)
                ])
            assert all(old_shape is None for old_shape in old_data_shape) or all(
                [strict_compare_tensors(old_data_shape[id], data_node.shape)
                 for id, data_node in enumerate(data_nodes)]), \
                "After re-inference of {} node, old and new shapes do not match. Old shapes: {}, new shapes: {}." \
                "".format(new_op_node.soft_get('name'), [old_data_shape[id] for id in range(len(data_nodes))],
                          [data_node.shape for data_node in data_nodes])
            for data_node in data_nodes:
                if log.getLogger().isEnabledFor(log.DEBUG):
                    log.debug(
                        'Finished running infer function, data nodes attributes: {}'
                        .format(data_node))
        return data_nodes[0] if len(data_nodes) == 1 else data_nodes
Example #9
0
    def extract(cls, loop_node):
        Loop.update_node_stat(loop_node, {})
        loop_name = loop_node.soft_get('name', loop_node.id)

        # check that required body and condition functions exist in the graph library
        main_graph = loop_node.graph
        body_graph_name = loop_node.pb.attr['body'].func.name
        cond_graph_name = loop_node.pb.attr['cond'].func.name
        assert 'library' in main_graph.graph, 'The graph does not contain a library that is required ' \
                                              'by node with name "{}".'.format(loop_name)
        library_graph = main_graph.graph['library']

        assert body_graph_name in library_graph, 'The library does not contain a function with name "{}" ' \
                                                 'that is required by node ' \
                                                 'with name "{}".'.format(body_graph_name, loop_name)
        body_graph_proto = library_graph[body_graph_name]

        assert cond_graph_name in library_graph, 'The library does not contain a function with name "{}" ' \
                                                 'that is required by node ' \
                                                 'with name "{}".'.format(cond_graph_name, loop_name)
        cond_graph_proto = library_graph[cond_graph_name]

        body_graph = Graph()
        # fill the body graph
        for attr_key in main_graph.graph.keys():
            if attr_key != 'library':
                body_graph.graph[attr_key] = copy.deepcopy(
                    main_graph.graph[attr_key])
            else:
                # it is sufficient to have a link to the library
                body_graph.graph['library'] = main_graph.graph['library']
        loop_node['body'] = body_graph

        # create Parameter nodes for the body graph
        body_parameters = []
        body_parameter_names = []
        for idx, pb_node in enumerate(body_graph_proto['input_arg']):
            param_id = body_graph.unique_id(pb_node.name)
            body_graph.add_node(param_id,
                                name=param_id,
                                kind='op',
                                op='Parameter',
                                pb=None,
                                shape=None)
            parameter_node = Node(body_graph, pb_node.name)
            Parameter.update_node_stat(
                parameter_node, {
                    'data_type':
                    tf_dtype_extractor(pb_node.type),
                    'permute_attrs':
                    PermuteAttrs().update_attrs(attrs=[('shape', 'output:0')])
                })
            body_parameters.append(parameter_node)
            body_parameter_names.append(param_id)

        # update the loop body graph with the body function graph
        body_results = []
        update_body_graph(body_graph, body_graph_proto, body_parameter_names,
                          body_results)

        # update the loop body graph with the condition function graph
        update_body_graph(body_graph, cond_graph_proto, body_parameter_names,
                          body_results)

        # add 'internal_layer_id' attribute which is a must have attribute for the loop body node
        for idx, body_node in enumerate(body_graph.get_op_nodes()):
            body_node['internal_layer_id'] = idx

        body_graph.stage = 'front'

        # Currently,
        # Loop Inputs Order:
        #   0    - current iteration
        #   1    - trip count
        #   2..  - "loop carried" dependencies variables
        #
        # Body Inputs Order:
        #   0    - current iteration
        #   1    - trip count
        #   2..  - "loop carried" dependencies variables
        #
        # Body Outputs Order:
        #   0      - current iteration
        #   1      - trip count
        #   2..    - "loop carried" dependencies variables
        #
        # Loop Outputs Order:
        #   0    - current iteration
        #   1    - trip count
        #   2..  - "loop carried" dependencies variables
        #
        # so inputs must be reordered and execution condition must be created in the front transformation
        # to be aligned with the specification

        # connect external input ports with body parameter nodes except current iteration
        # since it must be disconnected from external port
        for idx in range(1, len(body_parameters)):
            Loop.connect_body_input(loop_node, idx, body_parameters[idx])

        # mark current iteration input Parameter node and execution condition Result node
        Loop.mark_current_iteration_parameter_node(loop_node,
                                                   body_parameters[0])
        Loop.mark_execution_condition_result_node(loop_node, body_results[-1])

        # connect back edges in the body except current iteration
        for idx in range(1, len(body_parameters)):
            Loop.add_back_edge(loop_node, body_parameters[idx],
                               body_results[idx])

        # connect body outputs with Loop operation output ports except the execution condition result
        for idx in range(len(body_results) - 1):
            Loop.connect_body_output(loop_node, idx, body_results[idx])

        # run function to parse body nodes attributes similar to the main graph
        extract_node_attrs(
            body_graph, lambda node: tf_op_extractor(
                node, check_for_duplicates(tf_op_extractors)))
        return cls.enabled
Example #10
0
    def __load_xml(self):
        xml_tree = self.xml_tree or ET.parse(self.path_to_xml)
        xml_root = xml_tree.getroot()
        xml_layers = {}
        xml_edges = []
        statistics = {}

        Edge = namedtuple('edge',
                          ['from_layer', 'from_port', 'to_layer', 'to_port'])

        # Create graph with operations only
        self.graph = Graph()
        self.graph.graph['hashes'] = {}

        self.graph.graph['ir_version'] = int(
            xml_root.attrib['version']) if xml_root.attrib.get(
                'version') is not None else None
        self.graph.graph['layout'] = 'NCHW'
        self.graph.name = xml_root.attrib['name'] if xml_root.attrib.get(
            'name') is not None else None

        # Parse XML
        for child in xml_root:
            if child.tag == 'layers':
                for layer in child:
                    layer_id, layer_attrs = self.__load_layer(layer)
                    xml_layers.update({layer_id: layer_attrs})
            elif child.tag == 'edges':
                for edge in child:
                    xml_edges.append(
                        Edge(edge.attrib['from-layer'],
                             int(edge.attrib['from-port']),
                             edge.attrib['to-layer'],
                             int(edge.attrib['to-port'])))
            elif child.tag == 'statistics':
                layers = child.findall('layer')
                for layer in layers:
                    statistics[layer.find('name').text] = {
                        'min': layer.find('min').text,
                        'max': layer.find('max').text
                    }
            elif child.tag == 'meta_data':
                for elem in child:
                    if elem.tag == 'cli_parameters':
                        for det in elem:
                            if det.tag != 'unset':
                                value = det.attrib['value']
                                if value in ['True', 'False']:
                                    value = False if value == 'False' else True
                                self.meta_data[det.tag] = value
                            else:
                                self.meta_data[det.tag] = det.attrib[
                                    'unset_cli_parameters'].split(',_')
            elif child.tag == 'quantization_parameters':
                # Section with Post Optimization Toolkit parameters
                self.meta_data['quantization_parameters'] = dict()
                for elem in child:
                    if elem.tag == 'config':
                        self.meta_data['quantization_parameters'][
                            'config'] = elem.text
                    elif elem.tag in ['version', 'cli_params']:
                        self.meta_data['quantization_parameters'][
                            elem.tag] = elem.attrib['value']

        self.graph.graph['cmd_params'] = Namespace(
            **self.meta_data)  # TODO check what we need all this attrs

        if len(statistics):
            self.graph.graph['statistics'] = statistics

        for layer in xml_layers.keys():
            self.graph.add_node(layer, **xml_layers[layer])

        xml_edges.sort(key=lambda x: x.to_layer)

        for edge in xml_edges:
            self.graph.add_edges_from([(edge.from_layer, edge.to_layer, {
                'from_port': edge.from_port,
                'to_port': edge.to_port
            })])

        # Insert data nodes between op nodes and insert data nodes with weights
        nodes = list(self.graph.nodes())
        for node in nodes:
            out_edges = Node(self.graph, node).get_outputs()
            data_nodes = {}
            for port in self.graph.node[node]['ports']:
                data = self.graph.unique_id(prefix='data_')
                self.graph.add_node(
                    data, **{
                        'kind': 'data',
                        'shape': self.graph.node[node]['ports'][port][0],
                        'value': None
                    })
                self.graph.add_edges_from([(node, data, {'out': port})])
                data_nodes.update({port: data})

            for out_node, edge_attrs in out_edges:
                self.graph.remove_edge(node, out_node)
                if edge_attrs['from_port'] in data_nodes:
                    data = data_nodes[edge_attrs['from_port']]
                else:
                    raise RuntimeError(
                        "SMTH wrong with IR! There is an edge from not existing port"
                    )
                self.graph.add_edges_from([(data, out_node, {
                    'in': edge_attrs['to_port']
                })])
    def find_and_replace_pattern(self, graph: Graph):
        if graph.graph['layout'] != 'NHWC':
            # we check it here because this transformation is called explicitly from the pipeline
            return

        # reshape from 4D-5D -> ND. Insert Transpose(NC(D)HW->N(D)HWC) before Reshape
        for reinterp_shape_node_id in graph.get_nodes_with_attributes(reinterp_shape=True):
            reinterp_shape_node = Node(graph, reinterp_shape_node_id)
            assert 0 in reinterp_shape_node.in_nodes(), 'Node {} does not have 0 input. \n{}'.format(
                reinterp_shape_node_id, graph.dump_graph_for_graphviz())
            input_shape = reinterp_shape_node.in_node(0).shape
            if not is_input_data_in_correct_layout(reinterp_shape_node, 0) and len(input_shape) >= 4:
                order_const = Const(graph, {'value': PermuteAttrs().get_nchw_to_nhwc_permutation(len(input_shape)).perm
                                            }).create_node()
                permute_node = Transpose(graph,
                                         {'name': reinterp_shape_node.in_port(0).get_source().node.name + '/Transpose'
                                          }).create_node()
                reinterp_shape_node.in_port(0).get_connection().insert_node(permute_node)
                order_const.out_port(0).connect(permute_node.in_port(1))
                order_const.infer(order_const)

                # do not infer the Transpose node because it should have input data node in NCHW layout (but currently
                # it is NHWC because data node attributes has not been permuted yet) and produce output in NHWC layout
                # (which is true at this moment)
                permute_node['need_shape_inference'] = False
                # mark the Transpose output data node having correct layout so it's shape will not be permuted
                mark_output_as_in_correct_layout(permute_node, 0)

                # keep the reinterp_shape_node in NHWC layout
                mark_input_as_in_correct_layout(reinterp_shape_node, 0)
                mark_input_as_in_correct_layout(reinterp_shape_node, 1)

        # reshape from ND -> 4D-5D. Insert Transpose(N(D)HWC->NC(D)HW) after Reshape
        for reinterp_shape_node_id in graph.get_nodes_with_attributes(reinterp_shape=True):
            reinterp_shape_node = Node(graph, reinterp_shape_node_id)
            assert 0 in reinterp_shape_node.out_nodes(), 'Node {} does not have 0 output. \n{}'.format(
                reinterp_shape_node_id, graph.dump_graph_for_graphviz())
            output_shape = reinterp_shape_node.out_node(0).shape
            if not is_output_data_in_correct_layout(reinterp_shape_node, 0) and len(output_shape) >= 4:
                order_const = Const(graph, {
                    'value': PermuteAttrs().get_nhwc_to_nchw_permutation(len(output_shape)).perm}).create_node()
                permute_node = Transpose(graph, {'name': reinterp_shape_node.id + '/Transpose'}).create_node()
                reinterp_shape_node.out_port(0).get_connection().insert_node(permute_node)
                order_const.out_port(0).connect(permute_node.in_port(1))

                # the Reshape and Transpose operations should work in original (NHWC layout) so the Transpose
                # will convert it to the NCHW
                mark_input_as_in_correct_layout(permute_node, 0)
                mark_input_as_in_correct_layout(permute_node, 1)
                # do not set Transpose output data node 'correct_data_layout' attribute so the data node shape will be
                # permuted

                # keep the reinterp_shape_node in NHWC layout
                mark_output_as_in_correct_layout(reinterp_shape_node, 0)
                mark_input_as_in_correct_layout(reinterp_shape_node, 1)

                # do not re-infer the Transpose node because it output data node should be in NHWC layout to make the
                # rest of the graph consistent
                permute_node['need_shape_inference'] = False

        # TODO remove the following line when the unified pipeline will be for back transformations
        graph_clean_up_tf(graph)
Example #12
0
    def __load_layer(self, layer):
        """
            Layer example

            <layer id="1" name="862" precision="FP32" type="Convolution">
                <data dilation-x="1" dilation-y="1" group="1" kernel-x="1" kernel-y="5" output="32" pad-b="0" pad-r="2" pad-x="2" pad-y="0" stride-x="1" stride-y="1"/>
                <input>
                    <port id="0">
                        <dim>1</dim>
                        <dim>3</dim>
                        <dim>32</dim>
                        <dim>32</dim>
                    </port>
                </input>
                <output>
                    <port id="3">
                        <dim>1</dim>
                        <dim>32</dim>
                        <dim>32</dim>
                        <dim>32</dim>
                    </port>
                </output>
                <blobs>
                    <weights offset="0" size="1920"/>
                    <biases offset="1920" size="128"/>
                </blobs>
            </layer>

        """

        layer_id = layer.attrib['id']

        layer_attrs = layer.attrib
        layer_attrs.update({'ports': {}, 'kind': 'op'})

        inputs_counter = 0

        for attr in layer:
            if attr.tag == 'data':
                new_attrs = self.__normalize_attrs(attr.attrib)
                if layer.attrib['type'] == 'Const':
                    assert 'offset' in new_attrs and 'size' in new_attrs, \
                        'Incorrect attributes for Const layer, {} instead of {}!'.format(new_attrs.keys(), ['offset', 'size'])
                    new_attrs.update(
                        self.__prepare_bin_attrs(
                            layer, 0, 'custom', new_attrs['offset'],
                            new_attrs['size'],
                            layer[1][0].attrib['precision']))
                layer_attrs.update(new_attrs)
            elif attr.tag == 'input':
                inputs_counter = len(attr)
            elif attr.tag == 'output':
                output = attr
                for port in output:
                    port_id = int(port.attrib['id'])
                    output_shape = []
                    for dim in port:
                        output_shape.append(int(dim.text))

                    out_tensor_names = None
                    if 'names' in port.attrib:
                        out_tensor_names = port.attrib['names']

                    layer_attrs['ports'].update(
                        {port_id: (output_shape, out_tensor_names)})
            elif attr.tag == 'blobs':
                in_port = inputs_counter
                for blob_attr in attr:
                    layer_attrs.update(
                        self.__prepare_bin_attrs(
                            layer, in_port, blob_attr.tag,
                            blob_attr.attrib['offset'],
                            blob_attr.attrib['size'],
                            blob_attr.attrib.get('precision', None)))
                    in_port += 1
            elif attr.tag == 'body':
                xml_body_child = list(layer.iterfind('body'))
                assert len(xml_body_child) == 1

                body_ir = IREngine(path_to_xml=None,
                                   path_to_bin=self.path_to_bin,
                                   xml_tree=ET.ElementTree(xml_body_child[0]))
                self.graph.graph['hashes'].update(
                    body_ir.graph.graph['hashes'])

                # Find port_map section and take an input_port_map & output_port_map
                xml_port_map = list(layer.iterfind('port_map'))
                if not len(xml_port_map) == 1:
                    log.warning(
                        "TensorIterator body won\'t be compared due to missing port_map section!"
                    )
                    continue
                xml_port_map = xml_port_map[0]

                input_layers = []
                input_port_map = []
                output_port_map = []

                for port in xml_port_map:
                    if port.tag == 'input':
                        if 'internal_layer_id' not in port.attrib:
                            log.warning(
                                "internal_layer_id attrib not found in input section"
                            )
                        else:
                            input_layers.append(
                                Node(body_ir.graph,
                                     port.attrib['internal_layer_id']))
                            input_port_map.append(
                                self.__normalize_attrs(port.attrib))
                    elif port.tag == 'output':
                        if 'internal_layer_id' not in port.attrib:
                            log.warning(
                                "internal_layer_id attrib not found in output section"
                            )
                        else:
                            output_port_map.append(
                                self.__normalize_attrs(port.attrib))

                body_ir.input_node = input_layers[0]
                layer_attrs.update({'body': body_ir})
                layer_attrs.update({'input_port_map': input_port_map})
                layer_attrs.update({'output_port_map': output_port_map})

                xml_back_edges_map = list(layer.iterfind('back_edges'))
                if not len(xml_back_edges_map) == 1:
                    log.warning(
                        "TensorIterator body won\'t be compared due to missing back_edges section!"
                    )
                    continue
                xml_back_edges_map = xml_back_edges_map[0]

                back_edges = []

                for edge in xml_back_edges_map:
                    back_edges.append(self.__normalize_attrs(edge.attrib))

                layer_attrs.update({'back_edges': back_edges})

        return layer_id, layer_attrs
    def test_1(self):
        graph = build_graph(nodes_attributes,
                            [('placeholder', 'shuffle_channel'),
                             ('shuffle_channel', 'result')],
                            nodes_with_edges_only=True)
        graph.graph['layout'] = 'NCHW'
        graph.stage = 'front'

        ref_graph = build_graph(nodes_attributes, [('placeholder', 'shape', {
            'in': 0,
            'out': 0
        }), ('shape', 'batch_gather', {
            'in': 0,
            'out': 0
        }), ('batch_gather_idx', 'batch_gather', {
            'in': 1,
            'out': 0
        }), ('batch_gather_axis', 'batch_gather', {
            'in': 2,
            'out': 0
        }), ('shape', 'channel_gather', {
            'in': 0,
            'out': 0
        }), ('channel_gather_idx', 'channel_gather', {
            'in': 1,
            'out': 0
        }), ('channel_gather_axis', 'channel_gather', {
            'in': 2,
            'out': 0
        }), ('channel_gather', 'output_channels', {
            'in': 0,
            'out': 0
        }), ('div_group', 'output_channels', {
            'in': 1,
            'out': 0
        }), ('output_channels', 'convert', {
            'in': 0,
            'out': 0
        }), ('batch_gather', 'concat', {
            'in': 0,
            'out': 0
        }), ('group', 'concat', {
            'in': 1,
            'out': 0
        }), ('convert', 'concat', {
            'in': 2,
            'out': 0
        }), ('const', 'concat', {
            'in': 3,
            'out': 0
        }), ('placeholder', 'reshape_split', {
            'in': 0,
            'out': 0
        }), ('concat', 'reshape_split', {
            'in': 1,
            'out': 0
        }), ('reshape_split', 'transpose', {
            'in': 0,
            'out': 0
        }), ('transpose_const', 'transpose', {
            'in': 1,
            'out': 0
        }), ('transpose', 'reshape_concat', {
            'in': 0,
            'out': 0
        }), ('shape', 'reshape_concat', {
            'in': 1,
            'out': 0
        }), ('reshape_concat', 'result')],
                                nodes_with_edges_only=True)

        ShuffleChannel().find_and_replace_pattern(graph)
        (flag, resp) = compare_graphs(graph,
                                      ref_graph,
                                      'result',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
        self.assertTrue(
            Node(graph, 'result').in_port(0).get_source().node.name ==
            'scname')
    def test_splice_with_constdim(self):
        graph = build_graph(self.nodes_attributes,
                            [('placeholder', 'in_node'), ('in_node', 'splice'),
                             ('splice', 'splice_data'),
                             ('splice_data', 'out_placeholder')])
        Node(graph, 'splice')['const_dim'] = 10
        Node(graph, 'splice_data')['shape'] = [1, 43]
        ReplaceSpliceNodePattern().find_and_replace_pattern(graph)

        ref_graph = build_graph(
            {
                'in_placeholder': {
                    'kind': 'op',
                    'op': None
                },
                'in_node': {
                    'kind': 'data',
                    'shape': [1, 13]
                },
                'split': {
                    'kind': 'op',
                    'op': 'Split'
                },
                'split_data_0': {
                    'kind': 'data'
                },
                'split_data_1': {
                    'kind': 'data'
                },
                'shape': {
                    'kind': 'op',
                    'op': 'ShapeOf'
                },
                'shape_data': {
                    'kind': 'data'
                },
                'crop_batch': {
                    'kind': 'op',
                    'op': 'Crop',
                    'offset': int64_array([0])
                },
                'crop_batch_data': {
                    'kind': 'data'
                },
                'crop_batch_dim': {
                    'kind': 'op',
                    'op': 'Const',
                    'value': int64_array([1])
                },
                'crop_batch_dim_data': {
                    'kind': 'data'
                },
                'second_dim': {
                    'kind': 'op',
                    'op': 'Const',
                    'value': int64_array([33])
                },
                'second_dim_data': {
                    'kind': 'data'
                },
                'gather_shape': {
                    'kind': 'op',
                    'op': 'Concat'
                },
                'gather_shape_data': {
                    'kind': 'data'
                },
                'fill_value': {
                    'kind': 'op',
                    'op': 'Const',
                    'value': int64_array([0])
                },
                'fill_value_data': {
                    'kind': 'data'
                },
                'broadcast': {
                    'kind': 'op',
                    'op': 'Broadcast'
                },
                'broadcast_data': {
                    'kind': 'data'
                },
                'memory_in': {
                    'kind': 'op',
                    'op': 'ReadValue'
                },
                'memory_in_data': {
                    'kind': 'data'
                },
                'crop_mem': {
                    'kind': 'op',
                    'op': 'Crop',
                    'offset': 3,
                    'dim': 30
                },
                'crop_mem_data': {
                    'kind': 'data'
                },
                'concat': {
                    'kind': 'op',
                    'op': 'Concat'
                },
                'concat_data': {
                    'kind': 'data'
                },
                'memory_out': {
                    'kind': 'op',
                    'op': 'Assign'
                },
                'memory_out_data': {
                    'kind': 'data'
                },
                'result': {
                    'kind': 'op',
                    'op': 'Result'
                },
                'shape_2': {
                    'kind': 'op',
                    'op': 'ShapeOf'
                },
                'shape_2_data': {
                    'kind': 'data'
                },
                'crop_batch_2': {
                    'kind': 'op',
                    'op': 'Crop',
                    'offset': int64_array([0])
                },
                'crop_batch_2_data': {
                    'kind': 'data'
                },
                'crop_batch_dim_2': {
                    'kind': 'op',
                    'op': 'Const',
                    'value': int64_array([1])
                },
                'crop_batch_dim_2_data': {
                    'kind': 'data'
                },
                'second_dim_2': {
                    'kind': 'op',
                    'op': 'Const',
                    'value': int64_array([33])
                },
                'second_dim_2_data': {
                    'kind': 'data'
                },
                'gather_shape_2': {
                    'kind': 'op',
                    'op': 'Concat'
                },
                'gather_shape_2_data': {
                    'kind': 'data'
                },
                'fill_value_2': {
                    'kind': 'op',
                    'op': 'Const',
                    'value': int64_array([0])
                },
                'fill_value_2_data': {
                    'kind': 'data'
                },
                'broadcast_2': {
                    'kind': 'op',
                    'op': 'Broadcast'
                },
                'broadcast_2_data': {
                    'kind': 'data'
                },
                'memory_in_constdims': {
                    'kind': 'op',
                    'op': 'ReadValue'
                },
                'memory_in_constdims_data': {
                    'kind': 'data'
                },
                'crop_mem_constdims': {
                    'kind': 'op',
                    'op': 'Crop',
                    'offset': 10,
                    'dim': 100
                },
                'crop_mem_constdims_data': {
                    'kind': 'data'
                },
                'concat_constdims': {
                    'kind': 'op',
                    'op': 'Concat'
                },
                'concat_constdims_data': {
                    'kind': 'data'
                },
                'memory_out_constdims': {
                    'kind': 'op',
                    'op': 'Assign'
                },
                'memory_out_constdims_data': {
                    'kind': 'data'
                },
                'result_constdims': {
                    'kind': 'op',
                    'op': 'Result'
                },
                'crop_first_constdims': {
                    'kind': 'op',
                    'op': 'Crop',
                    'offset': 0,
                    'dim': 10
                },
                'crop_first_constdims_data': {
                    'kind': 'data'
                },
                'concat_all': {
                    'kind': 'op',
                    'op': 'Concat'
                },
                'concat_all_data': {
                    'kind': 'data',
                    'shape': [1, 43]
                },
                'out_placeholder': {
                    'kind': 'op',
                    'op': 'placeholder'
                },
                'axis_const': {
                    'kind': 'op'
                },
                'axis_const_data': {
                    'value': None,
                    'shape': None,
                    'kind': 'data'
                },
                'split_dim_const': {
                    'kind': 'op'
                },
                'split_dim_const_data': {
                    'value': None,
                    'shape': None,
                    'kind': 'data'
                },
            }, [
                ('in_placeholder', 'in_node'),
                ('in_node', 'split', {
                    'in': 0
                }),
                ('split', 'split_data_0', {
                    'out': 0
                }),
                ('split', 'split_data_1', {
                    'out': 1
                }),
                ('split_data_0', 'shape'),
                ('shape', 'shape_data'),
                ('shape_data', 'crop_batch'),
                ('crop_batch', 'crop_batch_data'),
                ('crop_batch_dim', 'crop_batch_dim_data'),
                ('crop_batch_dim_data', 'crop_batch', {
                    'in': 1
                }),
                ('second_dim', 'second_dim_data'),
                ('second_dim_data', 'gather_shape', {
                    'in': 1
                }),
                ('crop_batch_data', 'gather_shape', {
                    'in': 0
                }),
                ('gather_shape', 'gather_shape_data'),
                ('fill_value', 'fill_value_data'),
                ('fill_value_data', 'broadcast', {
                    'in': 0
                }),
                ('gather_shape_data', 'broadcast', {
                    'in': 1
                }),
                ('broadcast', 'broadcast_data'),
                ('broadcast_data', 'memory_in'),
                ('memory_in', 'memory_in_data'),
                ('memory_in_data', 'crop_mem'),
                ('crop_mem', 'crop_mem_data'),
                ('crop_mem_data', 'concat', {
                    'in': 0
                }),
                ('split_data_0', 'concat', {
                    'in': 1
                }),
                ('concat', 'concat_data'),
                ('concat_data', 'memory_out'),
                ('memory_out', 'memory_out_data'),
                ('memory_out_data', 'result'),
                ('split_data_1', 'shape_2'),
                ('shape_2', 'shape_2_data'),
                ('shape_2_data', 'crop_batch_2'),
                ('crop_batch_2', 'crop_batch_2_data'),
                ('crop_batch_dim_2', 'crop_batch_dim_2_data'),
                ('crop_batch_dim_2_data', 'crop_batch_2', {
                    'in': 1
                }),
                ('second_dim_2', 'second_dim_2_data'),
                ('second_dim_2_data', 'gather_shape_2', {
                    'in': 1
                }),
                ('crop_batch_2_data', 'gather_shape_2', {
                    'in': 0
                }),
                ('gather_shape_2', 'gather_shape_2_data'),
                ('fill_value_2', 'fill_value_2_data'),
                ('fill_value_2_data', 'broadcast_2', {
                    'in': 0
                }),
                ('gather_shape_2_data', 'broadcast_2', {
                    'in': 1
                }),
                ('broadcast_2', 'broadcast_2_data'),
                ('broadcast_2_data', 'memory_in_constdims'),
                ('memory_in_constdims', 'memory_in_constdims_data'),
                ('memory_in_constdims_data', 'crop_mem_constdims'),
                ('crop_mem_constdims', 'crop_mem_constdims_data'),
                ('crop_mem_constdims_data', 'concat_constdims', {
                    'in': 0
                }),
                ('split_data_1', 'concat_constdims', {
                    'in': 1
                }),
                ('concat_constdims', 'concat_constdims_data'),
                ('concat_constdims_data', 'memory_out_constdims'),
                ('memory_out_constdims', 'memory_out_constdims_data'),
                ('memory_out_constdims_data', 'result_constdims'),
                ('concat_constdims_data', 'crop_first_constdims'),
                ('crop_first_constdims', 'crop_first_constdims_data'),
                ('crop_first_constdims_data', 'concat_all', {
                    'in': 1
                }),
                ('concat_data', 'concat_all', {
                    'in': 0
                }),
                ('concat_all', 'concat_all_data'),
                ('concat_all_data', 'out_placeholder'),
                ('axis_const', 'axis_const_data'),
                ('split_dim_const', 'split_dim_const_data'),
                ('axis_const_data', 'split', {
                    'in': 1
                }),
                ('split_dim_const_data', 'split', {
                    'in': 2
                }),
            ])

        (flag, resp) = compare_graphs(graph, ref_graph, 'out_placeholder')
        self.assertTrue(flag, resp)
Example #15
0
    def test_remove_noop_nodes_check_out_port(self):
        graph = build_graph(
            {
                'input': {
                    'type': 'Placeholder',
                    'value': None,
                    'kind': 'op'
                },
                'noop': {
                    'type': 'NoOp',
                    'value': None,
                    'kind': 'op'
                },
                'output_1': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op'
                },
                'output_2': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op'
                },
                'output_3': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op'
                },
            }, [('input', 'noop'), ('noop', 'output_1', {
                'in': 4,
                'out': 1
            }), ('noop', 'output_2', {
                'in': 2,
                'out': 1
            }), ('noop', 'output_3', {
                'in': 10,
                'out': 1
            })])

        ref_graph = build_graph(
            {
                'input': {
                    'type': 'Placeholder',
                    'value': None,
                    'kind': 'op'
                },
                'output_1': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op'
                },
                'output_2': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op'
                },
                'output_3': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op'
                },
            }, [('input', 'output_1', {
                'in': 4,
                'out': 0
            }), ('input', 'output_2', {
                'in': 2,
                'out': 0
            }), ('input', 'output_3', {
                'in': 10,
                'out': 0
            })],
            nodes_with_edges_only=True)

        erase_node(Node(graph, 'noop'))

        compare_graphs(graph, ref_graph, 'output_1')
Example #16
0
def sub_graph_between_nodes(graph: Graph,
                            start_nodes: list,
                            end_nodes: list,
                            detect_extra_start_node: callable = None):
    """
    Finds nodes of the sub-graph between 'start_nodes' and 'end_nodes'. Input nodes for the sub-graph nodes are also
    added to the sub-graph. Constant inputs of the 'start_nodes' are also added to the sub-graph.
    :param graph: graph to operate on.
    :param start_nodes: list of nodes names that specifies start nodes.
    :param end_nodes: list of nodes names that specifies end nodes.
    :return: list of nodes of the identified sub-graph or None if the sub-graph cannot be extracted.
    """
    sub_graph_nodes = list()
    visited = set(start_nodes)
    d = deque(start_nodes)
    extra_start_nodes = []

    nx.set_node_attributes(G=graph, name='prev', values=None)
    while len(d) != 0:
        cur_node_name = d.popleft()
        sub_graph_nodes.append(cur_node_name)
        if cur_node_name not in end_nodes:  # do not add output nodes of the end_nodes
            for _, dst_node_name in graph.out_edges(cur_node_name):
                if dst_node_name not in visited:
                    d.append(dst_node_name)
                    visited.add(dst_node_name)
                    graph.node[dst_node_name]['prev'] = cur_node_name

        for src_node_name, _ in graph.in_edges(cur_node_name):
            # add input nodes for the non-start_nodes
            if cur_node_name not in start_nodes and src_node_name not in visited:
                if detect_extra_start_node is not None and detect_extra_start_node(
                        Node(graph, cur_node_name)):
                    extra_start_nodes.append(cur_node_name)
                else:
                    d.append(src_node_name)
                    graph.node[src_node_name]['prev'] = cur_node_name
                    visited.add(src_node_name)

    # use forward dfs to check that all end nodes are reachable from at least one of input nodes
    forward_visited = set()
    for start_node in start_nodes:
        graph.dfs(start_node, forward_visited)
    for end_node in end_nodes:
        if end_node not in forward_visited:
            raise Error('End node "{}" is not reachable from start nodes: {}. '
                        .format(end_node, start_nodes) + refer_to_faq_msg(74))

    for node_name in sub_graph_nodes:
        # sub-graph should not contain Placeholder nodes
        if graph.node[node_name].get('op', '') == 'Parameter':
            path = list()
            cur_node = node_name
            while cur_node and 'prev' in graph.node[cur_node]:
                path.append(str(cur_node))
                cur_node = graph.node[cur_node]['prev']
            log.debug("The path from input node is the following: {}".format(
                '\n'.join(path)))
            raise Error(
                'The matched sub-graph contains network input node "{}". '.
                format(node_name) + refer_to_faq_msg(75))
    if detect_extra_start_node is None:
        return sub_graph_nodes
    else:
        return sub_graph_nodes, extra_start_nodes
Example #17
0
    def test_replace_node_several_consumers(self):
        graph = build_graph(
            {
                'input_1': {
                    'type': 'Placeholder',
                    'value': None,
                    'kind': 'op'
                },
                'input_2': {
                    'type': 'Placeholder',
                    'value': None,
                    'kind': 'op'
                },
                'old': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op'
                },
                'output_1': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op'
                },
                'output_2': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op'
                },
                'output_3': {
                    'type': 'Identity',
                    'value': None,
                    'kind': 'op'
                },
            }, [
                ('input_1', 'old'),
                ('input_2', 'old'),
                ('old', 'output_3'),
                ('old', 'output_2'),
                ('old', 'output_1'),
            ])

        new_node = Const(graph, {
            'name': 'new'
        }).create_node([Node(graph, 'input_1'),
                        Node(graph, 'input_2')])
        replace_node(Node(graph, 'old'), new_node)

        self.assertEqual(len(graph.nodes()), 6)
        self.assertEqual(len(graph.edges()), 5)
        self.assertListEqual(sorted(graph.out_edges('new')),
                             [('new', 'output_1'), ('new', 'output_2'),
                              ('new', 'output_3')])
        expected_result = [('new', 'output_1', {
            'in': 0,
            'out': 2,
            'name': 'old'
        }), ('new', 'output_2', {
            'in': 0,
            'out': 1,
            'name': 'old'
        }), ('new', 'output_3', {
            'in': 0,
            'out': 0,
            'name': 'old'
        })]
        self.assertListEqual(sorted(graph.out_edges('new', data=True)),
                             expected_result)
Example #18
0
 def test_find_input(self):
     # Create references for this test:
     ref_nodes = [Node(self.IR.graph, '0')]
     # Check function:
     a = IREngine._IREngine__find_input(self.IR.graph)
     self.assertTrue(a == ref_nodes, 'Error')
Example #19
0
def read_node(file_descr, graph, component_layer_map, layer_node_map):
    s = file_descr.readline()
    if s == b'\n':
        return False
    tokens = s.split(b' ')
    if tokens[0] == b'input-node':
        in_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
        in_name = str(in_name).strip('b').replace('\'', "")
        in_shape = np.array([1, s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0]], dtype=np.int)

        if in_name not in layer_node_map:
            graph.add_node(in_name, name=in_name, kind='op', op='Parameter', parameters=None, shape=in_shape)
            layer_node_map[in_name] = in_name
        else:
            Node(graph, in_name)['op'] = 'Parameter'
            Node(graph, in_name)['shape'] = in_shape
    elif tokens[0] == b'component-node':
        layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
        layer_name = str(layer_name).strip('b').replace('\'', "")

        component_name = s[s.find(b'component=') + len(b'component='):].split(b' ')[0]
        if layer_name not in layer_node_map:
            node_name = graph.unique_id(prefix=layer_name)
            graph.add_node(node_name,
                           parameters=None,
                           op=None,
                           kind='op')
            layer_node_map[layer_name] = node_name
        else:
            node_name = layer_node_map[layer_name]

        if component_name in component_layer_map:
            component_layer_map[component_name].append(node_name)
        else:
            component_layer_map[component_name] = [node_name]

        # parse input
        in_node_id = parse_input_for_node(s[s.find(b'input=') + 6:], graph, layer_node_map)
        out_port = len(Node(graph, in_node_id).out_nodes())
        in_port = len(Node(graph, node_name).in_nodes())

        Node(graph, node_name).add_input_port(in_port)
        Node(graph, in_node_id).add_output_port(out_port)

        graph.add_edge(in_node_id, node_name, **create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port))
    elif tokens[0] == b'output-node':
        layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
        layer_name = str(layer_name).strip('b').replace('\'', "")
        node_name = graph.unique_id(prefix=layer_name)
        graph.add_node(node_name,
                       parameters=None,
                       op='Identity',
                       kind='op')
        out_name = graph.unique_id(prefix=node_name + "_out")
        graph.add_node(out_name,
                       parameters=None,
                       op='Result',
                       kind='op')
        Node(graph, node_name).add_input_port(0)
        Node(graph, node_name).add_output_port(0)
        Node(graph, out_name).add_input_port(0)
        graph.add_edge(node_name, out_name, **create_edge_attrs(node_name, out_name, node_name))

        # parse input
        in_node_id = parse_input_for_node(s[s.find(b'input=') + len(b'input='):], graph, layer_node_map)

        out_port = len(Node(graph, in_node_id).out_nodes())
        Node(graph, in_node_id).add_output_port(out_port)
        graph.create_edge(Node(graph, in_node_id), Node(graph, node_name), out_port, 0, create_edge_attrs(in_node_id, node_name, in_node_id, 0, out_port))

        objective_type = s[s.find(b'objective=') + 10:].split(b' ')[0].split(b'\n')[0]
        if objective_type != b'linear':
            raise Error("Unsupported objective-type for output {}".format(node_name))
    elif tokens[0] == b'dim-range-node':
        layer_name = s[s.find(b'name=') + len(b'name='):].split(b' ')[0]
        layer_name = str(layer_name).strip('b').replace('\'', "")
        offset = int(s[s.find(b'dim-offset=') + len(b'dim-offset='):].split(b' ')[0])
        dim = int(s[s.find(b'dim=') + len(b'dim='):].split(b' ')[0])

        if layer_name in layer_node_map:
            node_name = layer_node_map[layer_name]
            node = Node(graph, node_name)
            node['parameters'] = {'offset': np.array([offset]), 'dim': np.array([dim]), 'axis': np.array([1])}
            node['op'] = 'Crop'
        else:
            node_name = graph.unique_id(prefix=layer_name)
            graph.add_node(node_name,
                           parameters={'offset': np.array([offset]), 'dim': np.array([dim]), 'axis': np.array([1])},
                           op='Crop',
                           kind='op')
            layer_node_map[layer_name] = node_name
            node = Node(graph, node_name)

        in_node_id = parse_input_for_node(s[s.find(b'input-node=') + len(b'input-node='):], graph, layer_node_map)
        out_port = len(Node(graph, in_node_id).out_nodes())
        in_port = len(Node(graph, node_name).in_nodes())

        node.add_input_port(in_port)
        Node(graph, in_node_id).add_output_port(out_port)

        graph.create_edge(Node(graph, in_node_id), node, out_port, in_port, create_edge_attrs(in_node_id, node_name, in_node_id, in_port, out_port))

        # read dim info where possible to simplify shape calculation for MemoryOffset
        # shape calculation for MemoryOffset can't be done through shape of previous layer because
        # it is separated in 2 parts to remove cycle from graph
        for o_n_name, params in node.get_outputs():
            o_n = Node(graph, o_n_name)
            if o_n['op'] == 'MemoryOffset':
                o_n['parameters']['element_size'] = dim
    else:
        raise Error("Unsupported node specifier {}".format(tokens[0]))
    return True
Example #20
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in list(graph.nodes()):
            node = Node(graph, node)
            # Check that node layout mismatch with graph layout
            # For example: NHWC and NCHW or NCDHW and NDHWC
            if node.kind == 'op' and node.has_valid(
                    'layout') and node.layout != indices_mapping[len(
                        node.layout)][graph.graph['layout']]:
                input = node.in_node()
                output = node.out_node()

                # Calculate permutation for further Permute operations
                if graph.graph['layout'] == 'NCHW':
                    # if Node has NCHW and graph has NHWC layout
                    permutation = PermuteAttrs.get_nhwc_to_nchw_permutation(
                        len(node.layout))
                else:
                    # if Node has NHWC and graph has NCHW layout
                    permutation = PermuteAttrs.get_nchw_to_nhwc_permutation(
                        len(node.layout))

                # Schematic representation of transformation below
                #
                #                                           \            NCHW                              NCHW
                #            NHWC                        --  \            |  permutation       permutation  |
                #   data-->Convolution(example)-->data   --  /            |      |       NCHW      |        |
                #                                           /   data->Permute->data->Convolution->data->Permute->data

                # 1. Insert input Permute
                #    This Permute will permute input from original input layout to operation layout
                edge_attrs = graph.get_edge_data(input.id, node.id)[0]
                graph.remove_edge(input.id, node.id)

                input_permute_op = Permute(graph, {'order': permutation.perm})
                input_permute_data_node = input_permute_op.create_node_with_data(
                    [input], dict(name=node.name + '/Permute_'))

                graph.add_edge(input_permute_data_node.id, node.id,
                               **edge_attrs)

                # 2. Insert output Permute
                #    This Permute will permute output from operation layout to original input layout
                edge_attrs = graph.get_edge_data(node.id, output.id)[0]
                graph.remove_edge(node.id, output.id)

                input_data_node = Op.create_data_node(
                    graph, node, {'shape': output.shape[permutation.perm]},
                    edge_attrs)

                output_permute_op = Permute(graph, {'order': permutation.inv})
                output_permute_op.create_node_with_data([input_data_node],
                                                        dict(name=node.name +
                                                             '/Permute_'),
                                                        data_nodes=output)

                # 3. Add permutations for Node
                #    Here we use permutation mechanism where data nodes takes permutation attribute.
                #    And then we call permute_attrs method that permutes node attributes according to permutations on
                #    data nodes.
                node.in_node()['permutation'] = permutation
                node.out_node()['permutation'] = permutation
                node.permute_attrs.permute_attrs(node)

                node.in_node()['permutation'] = None
                node.out_node()['permutation'] = None
Example #21
0
def parse_specifier(string, graph, layer_node_map):
    pos = string.find(b'(')
    if pos == -1:
        # node name
        input_name = str(string.split(b' ')[0]).strip('b').replace("\'", '').replace('\\n', '')

        if input_name not in layer_node_map:
            node_name = graph.unique_id(prefix=input_name)
            graph.add_node(node_name, parameters=[], op="", kind='op')
            layer_node_map[input_name] = node_name
        else:
            node_name = layer_node_map[input_name]
        return node_name

    spec = string[:pos]
    args = get_args_for_specifier(string[pos:])
    if spec == b'Append':
        nodes = []
        for i in range(len(args)):
            nodes.append(parse_specifier(args[i], graph, layer_node_map))
        layer_name = 'Append_'
        for node in nodes:
            layer_name = layer_name + node + "_"

        if layer_name not in layer_node_map:
            concat_name = graph.unique_id(prefix=layer_name)
            graph.add_node(concat_name,
                           parameters=None,
                           op='concat',
                           kind='op')
            layer_node_map[layer_name] = concat_name
            i = 0
            Node(graph, concat_name).add_sequence_of_ports('in', range(len(nodes)))
            for node in nodes:
                out_port = len(Node(graph, node).out_nodes())
                Node(graph, node).add_output_port(out_port)
                graph.create_edge(Node(graph, node), Node(graph, concat_name), out_port, i, create_edge_attrs(node, concat_name, node, i, out_port))
                i = i + 1
        else:
            concat_name = layer_node_map[layer_name]
        return concat_name
    elif spec == b'Offset':
        node = parse_specifier(args[0], graph, layer_node_map)
        t = int(args[1])
        if len(args) > 2:
            raise Error("ModelOptimizer supports only 2 arguments for Offset")
        layer_name = 'Offset_' + node + '_'
        if t < 0:
            layer_name = layer_name + '_' + str(-t)
        else:
            layer_name = layer_name + str(t)

        if layer_name not in layer_node_map:
            memory_name = graph.unique_id(prefix=layer_name)
            layer_node_map[layer_name] = memory_name
            memory_name_2 = memory_name + '_out'
            graph.add_node(memory_name,
                           parameters=dict(t=t, pair_name=memory_name_2, has_default=False),
                           op='MemoryOffset',
                           kind='op')
            out_port = len(Node(graph, node).out_nodes())
            in_port = len(Node(graph, memory_name).in_nodes())
            Node(graph, memory_name).add_input_port(in_port)
            Node(graph, node).add_output_port(out_port, skip_if_exist=True)
            graph.create_edge(Node(graph, node), Node(graph, memory_name), out_port, in_port, create_edge_attrs(node, memory_name, node, in_port, out_port))
        else:
            memory_name = layer_node_map[layer_name]
        return memory_name
    elif spec == b'Sum':
        nodes = []
        for i in range(len(args)):
            nodes.append(parse_specifier(args[i], graph, layer_node_map))

        layer_name = 'Sum_'
        for node in nodes:
            layer_name = layer_name + node + "_"

        if layer_name not in layer_node_map:
            sum_name = graph.unique_id(prefix=layer_name)
            graph.add_node(sum_name, parameters=None, op='Add', kind='op')
            layer_node_map[layer_name] = sum_name
        else:
            sum_name = layer_node_map[layer_name]

        for i, node in enumerate(nodes):
            out_port = len(Node(graph, node).out_nodes())
            Node(graph, node).add_output_port(out_port, skip_if_exist=True)
            Node(graph, sum_name).add_input_port(i)
            graph.add_edge(node, sum_name, **create_edge_attrs(node, sum_name, node, i))

        return sum_name
    elif spec == b'IfDefined':
        node_id = parse_specifier(args[0], graph, layer_node_map)
        node = Node(graph, node_id)
        if node.op == 'MemoryOffset':
            node['parameters']['has_default'] = True
        return node_id
    elif spec == b'ReplaceIndex':
        node = parse_specifier(args[0], graph, layer_node_map)
        return node
    elif spec == b'Scale':
        node_name = parse_specifier(args[1], graph, layer_node_map)
        scale_value = float(args[0])
        layer_name = '{}/Mul/{}'.format(node_name, scale_value)

        if layer_name not in layer_node_map:
            scale_name = graph.unique_id(prefix=layer_name)
            scale_node = Mul(graph, {'name': scale_name}).create_node()

            layer_node_map[layer_name] = scale_name

            scale_const_name = 'Const_{}'.format(scale_value)
            const_node = Const(graph, {'name': scale_const_name, 'value': float_array([scale_value])}).create_node()

            node = Node(graph, node_name)
            graph.create_edge(const_node, scale_node, 0, 0, create_edge_attrs(const_node.id, scale_name.id, const_node.id))
            out_port = len(node.out_nodes())
            graph.create_edge(node, scale_node, out_port, 1, create_edge_attrs(node_name, scale_node.id, node_name, 1, out_port))
        else:
            scale_name = layer_node_map[layer_name]

        return scale_name
Example #22
0
 def test_slice_infer_2(self):
     graph = self.build_test_graph()
     node = Node(graph, 'sslice_1')
     node.end_mask = [1, 0, 0, 1]  # 6
     tf_strided_slice_infer(node)
     self.assertTrue(np.array_equal(node.out_node().shape, np.array([1, 35, 35, 2])), 'Wrong output shape detected')
Example #23
0
def muladd_to_scaleshift_action(graph: Graph, match: dict):
    mul = match['mul']
    add = match['add']
    output = match['output']

    # Pass works correctly only in case when node have only 1 output
    if len(mul.out_port(0).get_destinations()) > 1:
        return

    if mul.soft_get('can_be_scaleshift') is False or add.soft_get(
            'can_be_scaleshift') is False:
        return

    mul_weights_id = get_value_id(mul)
    mul_input_id = get_tensor_id(mul)
    add_weights_id = get_value_id(add)

    if mul_weights_id is None:
        log.debug("Mul->Add to ScaleShift: Mul {} has no weights".format(
            mul.name))
        return
    if mul_input_id is None:
        log.debug("Mul->Add to ScaleShift: Mul {} has no input".format(
            mul.name))
        return
    if add_weights_id is None:
        log.debug("Mul->Add to ScaleShift: Add {} has no weights".format(
            add.name))
        return

    input = mul.in_node(mul_input_id)
    weights = mul.in_node(mul_weights_id)
    bias = add.in_node(add_weights_id)

    # Transform values
    weights.value = np.squeeze(weights.value)
    weights.shape = np.array(weights.value.shape, dtype=np.int64)

    bias.value = np.squeeze(bias.value)
    bias.shape = np.array(bias.value.shape, dtype=np.int64)

    # Broadcast weights if they are scalar
    if weights.value.ndim == 0 and bias.value.ndim == 1:
        weights.value = np.full(bias.shape, weights.value.item())
        weights.shape = np.array(weights.value.shape, dtype=np.int64)

    if bias.shape != weights.shape:
        log.warning('Mul->Add to ScaleShift conversion stoped {} != {}'.format(
            weights.shape, bias.shape))
        return

    if bias.value.ndim != weights.value.ndim or bias.value.size != weights.value.size:
        log.debug(
            "Skipping Mul->Add to ScaleShift conversion for nodes {}, {} because of different weights "
            "and biases".format(mul.name, add.name))
        return

    if bias.value.size == 1 and weights.value.size == 1:
        log.debug(
            "Skipping Mul->Add to ScaleShift conversion for nodes {}, {}. Will be converted to Power"
            "".format(mul.name, add.name))
        return

    op_name = "ScaleShift"

    log.debug(
        "Fusing Mul->Add to {}. Input nodes: {} and {}, bias.shape = {}, weights.shape = {}"
        "".format(op_name, mul.id, add.id, bias.shape, weights.shape))

    graph.remove_edge(input.node, mul.id)
    graph.remove_edge(weights.node, mul.id)
    graph.remove_edge(bias.node, add.id)
    graph.remove_edge(add.node, output.id)

    op_node = graph.unique_id(mul.name + '/Fused{}_'.format(op_name))

    graph.add_node(
        op_node,
        **add_attrs_props(
            dict(kind='op',
                 precision="FP32",
                 type=op_name,
                 name=op_node,
                 op=op_name,
                 data_type=input.data_type)))
    scsh = Node(graph, op_node)
    scsh.add_input_port(0)
    scsh.add_input_port(1)
    scsh.add_input_port(2)
    scsh.add_output_port(0)

    update_ie_fields(graph.node[op_node])
    graph.add_edges_from([(input.node, op_node, {
        'in': 0
    }), (weights.node, op_node, {
        'in': 1,
        'bin': 'weights'
    }), (bias.node, op_node, {
        'in': 2,
        'bin': 'biases'
    }), (op_node, output.node, {
        'out': 0
    })])

    return
Example #24
0
 def test_slice_infer_6(self):
     graph = self.build_test_graph2()
     node = Node(graph, 'sslice_1')
     tf_strided_slice_infer(node)
     self.assertTrue(np.array_equal(node.out_node().shape, np.array([4])), 'Wrong output shape detected')
     self.assertTrue(np.array_equal(node.out_node().value, np.array([1, 34, 34, 62])), 'Wrong output value detected')
Example #25
0
    def replace_pattern(graph, match: dict):
        # Here we will found all parts of TI: condition, inputs/outputs, back edges, body and create TensorIterator Op
        # and make all checks needed for TensorIterator work
        cond_data = match['condition'].out_node(
            0) if not match['condition'].out_port(0).disconnected() else None
        time_data = match['condition'].out_node(1) if len(
            match['condition'].out_nodes()) >= 1 else None
        name = match['condition'].name

        back_edges = []
        inputs = []
        outputs = []

        if cond_data is not None:
            for node in cond_data.out_nodes():
                if node['kind'] == 'op' and node[
                        'op'] == 'TensorIteratorBackEdge':
                    back_edges.append(node.id)
                elif node['kind'] == 'op' and node[
                        'op'] == 'TensorIteratorInput':
                    inputs.append(node.id)
                elif node['kind'] == 'op' and node[
                        'op'] == 'TensorIteratorOutput':
                    outputs.append(node.id)

        if time_data is not None:
            for node in time_data.out_nodes():
                if node['kind'] == 'op' and node['op'] == 'TensorIteratorInput':
                    inputs.append(node.id)
                elif node['kind'] == 'op' and node[
                        'op'] == 'TensorIteratorOutput':
                    outputs.append(node.id)
                else:
                    # something goes wrong here
                    assert False
        condition = match['condition']
        tensor_sequence_length = condition.in_node(0)

        nodes_to_remove = [
            n.id
            for n in (condition, cond_data, time_data, tensor_sequence_length)
            if n is not None
        ]
        graph.remove_nodes_from(nodes_to_remove)

        body_nodes, extra_inputs = get_body(graph, inputs, outputs)

        if cond_data is not None:
            body_nodes = list(set(body_nodes) - set([cond_data]))

        inputs += extra_inputs

        assert all([node in graph.nodes() for node in body_nodes])

        inputs = [Node(graph, node) for node in inputs]
        outputs = [Node(graph, node) for node in outputs]
        back_edges = [Node(graph, node) for node in back_edges]

        external_inputs = [{
            'external_data_id':
            node.in_node(1 if node.has_valid('axis') else 0),
            'internal_data_id':
            node.out_node(0),
            'axis':
            node.axis,
            'start':
            node.start,
            'end':
            node.end,
            'stride':
            node.stride,
            'part_size':
            node.part_size
        } for node in inputs]

        external_outputs = [{
            'external_data_id':
            node.out_node(0),
            'internal_data_id':
            node.in_node(1 if node.has_valid('axis') else 0),
            'axis':
            node.axis,
            'start':
            node.start,
            'end':
            node.end,
            'stride':
            node.stride,
            'part_size':
            node.part_size
        } for node in outputs]

        back_edges_data = [{
            'from_data_id': node.in_node(1),
            'to_data_id': node.out_node(0),
            'init_data_id': node.in_node(0),
        } for node in back_edges]

        body = Graph(name='body')
        body.graph = graph.graph
        body.add_nodes_from([(node, graph.node[node]) for node in body_nodes])
        body.add_edges_from([
            (u, v, k, d) for u, v, k, d in graph.edges(data=True, keys=True)
            if u in body_nodes and v in body_nodes
        ])

        graph.remove_nodes_from(body_nodes + [match['condition'].id] +
                                [inp.id for inp in inputs] +
                                [out.id for out in outputs])
        internal_id_count = 0
        real_back_edges = []
        for edge in back_edges_data:
            assert edge['from_data_id'].id in body.nodes()
            assert edge['to_data_id'].id in body.nodes()
            assert edge['init_data_id'].id in body.nodes()
            edge['from_data_id'] = Node(body, edge['from_data_id'].id)
            edge['to_data_id'] = Node(body, edge['to_data_id'].id)
            edge['init_data_id'] = Node(body, edge['init_data_id'].id)
            add_opoutput(body, edge['from_data_id'].id, 0, False)

            # Assign/reuse ids for the back-edge start; it comes from from_data_id
            assert len(edge['from_data_id'].in_nodes()) == 1
            # layer id
            if not edge['from_data_id'].in_node().has_valid(
                    'internal_layer_id'):
                edge['from_data_id'].in_node(
                )['internal_layer_id'] = internal_id_count
                internal_id_count += 1
            edge['from_layer'] = edge['from_data_id'].in_node(
            )['internal_layer_id']

            # port id
            if 'internal_port_id' not in edge['from_data_id'].in_edge():
                edge['from_data_id'].in_edge(
                )['internal_port_id'] = internal_id_count
                internal_id_count += 1
            edge['from_port'] = edge['from_data_id'].in_edge(
            )['internal_port_id']

            # Look at all consumers for a data that ends a back-edge
            # For each such consumer, there will be a separate back-edge (and input)
            current_real_back_edges = []
            for _, consumer, key, edge_attrs in body.out_edges(
                    edge['to_data_id'].id, data=True, keys=True):

                real_edge = {}
                real_edge.update(
                    edge)  # all real back_edges have the same back-edge start

                consumer = Node(body, consumer)

                if real_edge['to_data_id'].in_node().has_valid(
                        'internal_layer_id'):
                    assert False
                    real_edge['to_data_id'].out_node()['internal_layer_id'] = \
                        real_edge['to_data_id'].in_node().internal_layer_id
                elif not consumer.has_valid('internal_layer_id'):
                    consumer['internal_layer_id'] = internal_id_count
                    internal_id_count += 1
                real_edge['to_layer'] = consumer['internal_layer_id']

                assert 'internal_port_id' not in edge_attrs
                assert len(real_edge['init_data_id'].out_edges()) == 1
                assert not 'internal_port_id' in real_edge[
                    'init_data_id'].out_edge()
                edge_attrs['internal_port_id'] = internal_id_count
                internal_id_count += 1
                real_edge['to_port'] = edge_attrs['internal_port_id']
                real_edge['consumer'] = consumer
                real_edge['consumer_key'] = key

                real_edge['attrs'] = deepcopy(edge_attrs)
                current_real_back_edges.append(real_edge)

            # connect initial data node with each consumer providing actual edge attributes
            body.add_edges_from([
                (real_edge['init_data_id'].id, real_edge['consumer'].id,
                 real_edge['consumer_key'], real_edge['attrs'])
                for real_edge in current_real_back_edges
            ])

            body.remove_nodes_from(
                [edge['to_data_id'].id, edge['to_data_id'].in_node().id])
            real_back_edges += current_real_back_edges

        real_external_inputs = []

        for ext_inp in external_inputs:
            assert ext_inp['external_data_id'].id not in body.nodes()
            assert ext_inp['internal_data_id'].id in body.nodes()
            ext_inp['internal_data_id'] = Node(body,
                                               ext_inp['internal_data_id'].id)

            if ext_inp['axis'] is not None:
                # Insert squeezing resize at input port that has partitioning
                shape = ext_inp['internal_data_id'].shape.copy()
                assert not ext_inp['internal_data_id'].has_valid('value')
                new_input_data = Op._create_data_node(
                    body,
                    ext_inp['internal_data_id'].name + '/UnsqueezedInput',
                    dict(shape=shape_insert(shape, ext_inp['axis'], 1)))

                reshape_op = Squeeze(
                    body,
                    dict(name=ext_inp['internal_data_id'].name +
                         '/InputSqueeze'))
                reshape_dim_data = Const(
                    body, {
                        'name':
                        ext_inp['internal_data_id'].name + '/ReshapeDim',
                        'value': ext_inp['axis']
                    }).create_node_with_data()
                reshape_op.create_node_with_data(
                    [new_input_data, reshape_dim_data],
                    data_nodes=[ext_inp['internal_data_id']])
                ext_inp['internal_data_id'] = new_input_data

            ext_inp['internal_data_id']['is_input'] = True
            assert len(ext_inp['internal_data_id'].in_nodes()) == 0
            ext_inp['external_port_id'] = internal_id_count
            internal_id_count += 1
            for _, consumer, edge_attrs in body.out_edges(
                    ext_inp['internal_data_id'].id, data=True):
                real_ext_inp = {}
                real_ext_inp.update(ext_inp)
                consumer = Node(body, consumer)
                if not consumer.has_valid('internal_layer_id'):
                    consumer['internal_layer_id'] = internal_id_count
                    internal_id_count += 1
                if not 'internal_port_id' in edge_attrs:
                    edge_attrs['internal_port_id'] = internal_id_count
                    internal_id_count += 1
                real_ext_inp['internal_layer_id'] = consumer[
                    'internal_layer_id']
                real_ext_inp['internal_port_id'] = edge_attrs[
                    'internal_port_id']
                real_external_inputs.append(real_ext_inp)

        for ext_out in external_outputs:
            assert ext_out['external_data_id'].id not in body.nodes()
            assert ext_out['internal_data_id'].id in body.nodes()
            ext_out['internal_data_id'] = Node(body,
                                               ext_out['internal_data_id'].id)

            if ext_out['axis'] is not None:
                # Insert unsqueezing resize at output port that has partitioning
                reshape_op = Unsqueeze(
                    body,
                    dict(name=ext_out['internal_data_id'].name +
                         '/OutputUnsqueeze'))
                reshape_dim_data = Const(
                    body, {
                        'name':
                        ext_out['internal_data_id'].name + '/ReshapeDim',
                        'value': ext_out['axis']
                    }).create_node_with_data()
                ext_out['internal_data_id'] = reshape_op.create_node_with_data(
                    [ext_out['internal_data_id'], reshape_dim_data])

            # TODO: add here working with simple outputs

            if not any([
                    out_node.soft_get('op', None) == 'Result'
                    for out_node in ext_out['internal_data_id'].out_nodes()
            ]):
                add_opoutput(body, ext_out['internal_data_id'].id, 0, False)

            # assert len(ext_out['internal_data_id'].out_nodes()) == 0
            assert len(ext_out['internal_data_id'].in_nodes()) == 1
            if not 'internal_layer_id' in ext_out['internal_data_id'].in_node(
            ):
                ext_out['internal_data_id'].in_node(
                )['internal_layer_id'] = internal_id_count
                internal_id_count += 1
            if not 'internal_port_id' in ext_out['internal_data_id'].in_edge():
                ext_out['internal_data_id'].in_edge(
                )['internal_port_id'] = internal_id_count
                internal_id_count += 1
            ext_out['internal_layer_id'] = ext_out['internal_data_id'].in_node(
            )['internal_layer_id']
            ext_out['internal_port_id'] = ext_out['internal_data_id'].in_edge(
            )['internal_port_id']
            ext_out['external_port_id'] = internal_id_count
            internal_id_count += 1

        # create TensorIterator layer with pre-computed components
        ti_op = TensorIterator(
            graph, {
                'name':
                name + '/TensorIterator',
                'body':
                body,
                'in_ports_count':
                len(external_inputs),
                'out_ports_count':
                len(external_outputs),
                'input_port_map': [{
                    field: external_input[field]
                    for field in [
                        'external_port_id', 'internal_layer_id',
                        'internal_port_id', 'axis', 'stride', 'part_size',
                        'start', 'end'
                    ]
                } for external_input in real_external_inputs],
                'output_port_map': [{
                    field: external_output[field]
                    for field in [
                        'external_port_id', 'internal_layer_id',
                        'internal_port_id', 'axis', 'stride', 'part_size',
                        'start', 'end'
                    ]
                } for external_output in external_outputs],
                'back_edges': [{
                    field: edge[field]
                    for field in
                    ['from_layer', 'from_port', 'to_layer', 'to_port']
                } for edge in real_back_edges],
            })

        ti_outs = ti_op.create_node_with_data(
            inputs=[inp['external_data_id'] for inp in external_inputs],
            edge_attrs=[{
                'external_port_id': inp['external_port_id']
            } for inp in external_inputs],
            data_nodes=[out['external_data_id'] for out in external_outputs])

        if not isinstance(ti_outs, list):
            ti_outs = [ti_outs]

        for i, out in enumerate(ti_outs):
            out.in_edge(
            )['external_port_id'] = external_outputs[i]['external_port_id']

        ti = ti_outs[0].in_node()
        TensorIterator.cover_body_input_data_nodes_with_parameter_ops(ti)
        TensorIterator.cover_body_constant_data_nodes_with_const_ops(ti)
        TensorIterator.normalize_internal_ids(ti)
Example #26
0
    def extract(cls, loop_node):
        Loop.update_node_stat(loop_node, {})

        body_graph_proto = onnx_attr(loop_node, 'body', 'g', None)
        main_graph = loop_node.graph

        # create a Graph object for the body and take graph attributes from the main graph
        body_graph = Graph()
        main_graph_attrs_copy = copy.deepcopy(main_graph.graph)
        del main_graph_attrs_copy['tensor_mapping']
        body_graph.graph.update(main_graph_attrs_copy)
        loop_node['body'] = body_graph

        # maps a tensor name to a node produced it and the node port: str -> (node_id, node_port)
        data_nodes_map = {}
        body_graph.graph[
            'tensor_mapping'] = data_nodes_map  # save mapping for possible Loop inside the Loop

        body_parameters = add_initializers_and_inputs_to_graph(
            body_graph, body_graph_proto, data_nodes_map)

        external_edges = [
        ]  # (src_node, src_out_port), dest_body_parameter_node
        additional_params = {
        }  # (src_node, src_out_port) -> parameter_node (for manually added Parameters)
        # Go through all nodes in the original model order because data nodes are defined on-the-fly and order matters
        for pb_node in body_graph_proto.node:
            # create an NX node
            id = body_graph.unique_id(node_id(pb_node))
            body_graph.add_node(id, pb=pb_node, kind='op')

            # add incoming edges based on data_nodes_map
            for dst_port, inp in enumerate(pb_node.input):
                # should add edge inp --> id
                if inp not in data_nodes_map:
                    if inp == '':
                        # input is omitted; most likely it corresponds to an optional input for an operator
                        continue
                    elif inp in main_graph.graph['tensor_mapping']:
                        log.debug(
                            'The edge between outer and inner graphs detected: {} -> {}'
                            .format(inp, id))
                        if main_graph.graph['tensor_mapping'][
                                inp] not in additional_params:
                            # create new Parameter body node and connect the body node with the outer graph using it
                            param_id = str(inp)
                            body_graph.add_node(param_id,
                                                kind='op',
                                                op='Parameter',
                                                name=param_id,
                                                pb=None,
                                                shape=None)
                            parameter_node = Node(body_graph, param_id)
                            # need to manually update necessary attrs for the node because extractor will not be called
                            # for it because the node does not have .pb attribute
                            Parameter.update_node_stat(parameter_node, {})
                            external_edges.append(
                                (main_graph.graph['tensor_mapping'][inp],
                                 parameter_node))
                            src_id, src_port = param_id, 0
                            additional_params[main_graph.graph[
                                'tensor_mapping'][inp]] = parameter_node
                        else:
                            src_id, src_port = additional_params[
                                main_graph.graph['tensor_mapping'][inp]].id, 0
                    else:
                        raise Error(
                            'Reference to "{}" is not satisfied. A node refer not existing data tensor. ONNX '
                            'model is not consistent. Protobuf fragment: {}',
                            inp, pb_node)
                else:
                    src_id, src_port = data_nodes_map[inp]

                assert (body_graph.has_node(src_id))
                edge_attrs = {
                    'out': src_port,
                    'in': dst_port,
                    'name': inp,
                    'fw_tensor_debug_info': [(inp, inp)],
                    'in_attrs': ['in', 'name'],
                    'out_attrs': ['out', 'name'],
                    'data_attrs': ['fw_tensor_debug_info']
                }
                body_graph.add_edge(src_id, id, **edge_attrs)

            # add outgoing edges to data_nodes_map
            for src_port, out in enumerate(pb_node.output):
                if out in data_nodes_map:
                    log.debug("Detected reuse of blob {}.".format(out))
                data_nodes_map[out] = (id, src_port)

        body_results = []
        for output in body_graph_proto.output:
            tensor_name = str(output.name)
            node_name, output_port = data_nodes_map[tensor_name]
            assert body_graph.has_node(
                node_name
            ), 'The body graph does not contain output with name "{}"'.format(
                node_name)
            body_results.append(
                Node(body_graph,
                     add_opoutput(body_graph, node_name, output_port, False)))

        # add 'internal_layer_id' attribute which is a must have attribute for the loop body node
        for idx, body_node in enumerate(body_graph.get_op_nodes()):
            body_node['internal_layer_id'] = idx

        loop_carried_dependencies_count = len(body_graph_proto.input) - 2
        scan_outputs_count = len(
            body_graph_proto.output) - 1 - loop_carried_dependencies_count

        # Loop inputs:
        #   0 - trip count
        #   1 - execution condition
        #   2 .. - loop carried dependencies

        # Loop outputs:
        #   0 .. loop_carried_dependencies_count - 1 - loop carried dependencies
        #   loop_carried_dependencies_count .. - scan outputs

        # Body inputs:
        #   0 - iteration number
        #   1 - execution condition
        #   2 .. - loop carried dependencies

        # Body outputs:
        #   0 - execution condition
        #   1 .. loop_carried_dependencies_count - loop carried dependencies
        #   loop_carried_dependencies_count + 1 .. - scan outputs

        body_graph.stage = 'front'
        # some of the inputs/outputs may not be connected but the normalization transformation will take care of it
        # connection Loop body nodes with external input edges
        next_loop_input_port_idx = sorted(loop_node.in_edges().keys())[-1] + 1
        for (src_node, src_port), body_node in external_edges:
            main_graph.add_edge(
                src_node, loop_node.id, **{
                    'out': src_port,
                    'in': next_loop_input_port_idx,
                    'name': src_node,
                    'fw_tensor_debug_info': [(src_node, src_node)],
                    'in_attrs': ['in', 'name'],
                    'out_attrs': ['out', 'name'],
                    'data_attrs': ['fw_tensor_debug_info']
                })
            connect_body_input(loop_node, next_loop_input_port_idx, body_node)
            next_loop_input_port_idx += 1

        # mark current iteration input Parameter node
        Loop.mark_current_iteration_parameter_node(loop_node,
                                                   body_parameters[0])

        # connect initial value for "execution condition" input of the loop
        connect_body_input(loop_node, 1, body_parameters[1])
        # add back edge with "execution condition"
        Loop.add_back_edge(loop_node, body_parameters[1], body_results[0])
        # mark "execution condition" Result node
        Loop.mark_execution_condition_result_node(loop_node, body_results[0])

        # connect initial value for "loop carried" dependencies variables
        for idx in range(loop_carried_dependencies_count):
            connect_body_input(loop_node, idx + 2, body_parameters[idx + 2])
        # add back edge for "loop carried" dependencies variables
        for idx in range(loop_carried_dependencies_count):
            Loop.add_back_edge(loop_node, body_parameters[idx + 2],
                               body_results[idx + 1])
        # connect final value for "loop carried" dependencies variables
        for idx in range(loop_carried_dependencies_count):
            connect_body_output(loop_node, idx, body_results[idx + 1])

        # connect "scan outputs" and mark axis for concatenation
        for idx in range(loop_carried_dependencies_count,
                         loop_carried_dependencies_count + scan_outputs_count):
            connect_body_output(loop_node, idx, body_results[idx + 1], axis=0)

        # run function to parse body nodes attributes similar to the main graph
        extract_node_attrs(
            body_graph, lambda node: onnx_op_extractor(
                node, check_for_duplicates(onnx_op_extractors)))
        return cls.enabled
Example #27
0
def update_inputs(graph, inputs: list, node_name: str):
    node = Node(graph, node_name)
    if node.has_valid('kind') and node['kind'] == 'op' and node[
            'op'] == 'TensorIteratorInput':
        if node_name not in inputs:
            inputs.append(node_name)
    def test_switch_infer_with_condition(self):
        nodes = [('tensor', {
            'value': np.zeros((3, 3)),
            'kind': 'data',
            'executable': True,
            'shape': np.array([3, 3])
        }), ('pred_id', {
            'value': True,
            'kind': 'data',
            'executable': True
        }), ('switch', {
            'type': 'Switch',
            'kind': 'op',
            'op': 'Switch'
        }),
                 ('switch_data_0', {
                     'value': None,
                     'kind': 'data',
                     'executable': True
                 }),
                 ('switch_data_1', {
                     'value': None,
                     'kind': 'data',
                     'executable': True
                 })]
        edges = [('tensor', 'switch', {
            'in': 0
        }), ('pred_id', 'switch', {
            'in': 1
        }), ('switch', 'switch_data_0', {
            'out': 0
        }), ('switch', 'switch_data_1', {
            'out': 1
        })]
        graph = build_graph_with_attrs(nodes_with_attrs=nodes,
                                       edges_with_attrs=edges)

        # We should propagate shapes and values
        graph_ref = build_graph_with_attrs(nodes_with_attrs=nodes,
                                           edges_with_attrs=edges,
                                           update_nodes_attributes=[
                                               ('switch_data_0', {
                                                   'shape': np.array([3, 3]),
                                                   'value': np.zeros((3, 3))
                                               }),
                                               ('switch_data_1', {
                                                   'shape': np.array([3, 3]),
                                                   'value': np.zeros((3, 3))
                                               })
                                           ])

        tested_class = Switch(graph=graph, attrs={})

        node = Node(graph, 'switch')
        tested_class.infer(node)

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'switch_data_0',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
Example #29
0
 def _create_node(op_name: str):
     pb = onnx.helper.make_node(op_name, ["X"], ["Y"])
     graph = build_graph({'node_0': {'pb': pb}}, [])
     return Node(graph, 'node_0')
Example #30
0
def merge_nodes(graph: nx.MultiDiGraph,
                nodes_to_merge_names: list,
                inputs_desc: list = None,
                outputs_desc: list = None):
    """
    Merges nodes specified in the set 'nodes_to_merge_names' into one mega-node, creating new edges between mega-node
    and inputs/outputs nodes of the mega-node. The added edges contain name of input/output nodes which will be used for
    generation of placeholders and will be saved to the IR xml so IE plug-in know how to map input/output data for the
    layer. Also the function adds protobufs of the nodes of the sub-graph and 'Const' ops consumed by nodes in the
    sub-graph to the node's attribute 'pbs'.
    :param graph: the graph object to operate on.
    :param nodes_to_merge_names: list of nodes names that should be merged into a single node.
    :param inputs_desc: optional list describing input nodes order.
    :param outputs_desc: optional list describing output nodes order.
    """
    if not is_connected_component(graph, nodes_to_merge_names):
        log.warning(
            "The following nodes do not form connected sub-graph: {}".format(
                nodes_to_merge_names))
        dump_graph_for_graphviz(graph, nodes_to_dump=nodes_to_merge_names)

    new_node_name = unique_id(graph, "TFSubgraphCall_")
    log.info("Create new node with name '{}' for nodes '{}'".format(
        new_node_name, ', '.join(nodes_to_merge_names)))
    graph.add_node(new_node_name)
    new_node_attrs = graph.node[new_node_name]

    new_node_attrs['name'] = new_node_name
    set_tf_custom_call_node_attrs(new_node_attrs)
    new_node = Node(graph, new_node_name)

    added_input_tensors_names = set(
    )  # set of tensors that are were added as input to the sub-graph
    added_new_node_output_tensors = dict(
    )  # key - tensor name, value - out port

    for node_name in nodes_to_merge_names:
        node = Node(graph, node_name)
        add_node_pb_if_not_yet_added(node, new_node)
        for in_node_name, edge_attrs in get_inputs(graph, node_name):
            in_node = Node(graph, in_node_name)

            # internal edges between nodes of the sub-graph
            if in_node_name in nodes_to_merge_names:
                add_node_pb_if_not_yet_added(in_node, new_node)
                continue

            # edge outside of sub-graph into sub-graph
            if in_node_name not in nodes_to_merge_names:
                # we cannot use the 'in_node_name' as a protobuf operation name here
                # because the 'in_node_name' could be a sub-graph matched before.
                input_tensor_name = node.pb.input[edge_attrs['in']]
                if input_tensor_name not in added_input_tensors_names:
                    graph.add_edge(
                        in_node_name, new_node_name,
                        **merge_edge_props(
                            {
                                'in':
                                find_input_port(new_node, inputs_desc,
                                                node_name, edge_attrs['in']),
                                'out':
                                edge_attrs['out'],
                                'internal_input_node_name':
                                input_tensor_name,
                                'original_dst_node_name':
                                node_name,
                                'original_dst_port':
                                edge_attrs['in'],
                                'in_attrs': [
                                    'in', 'internal_input_node_name',
                                    'original_dst_node_name',
                                    'original_dst_port', 'placeholder_name'
                                ],
                                'out_attrs': ['out']
                            }, edge_attrs))
                    log.debug(
                        "Creating edge from outside of sub-graph to inside sub-graph: {} -> {}"
                        .format(in_node_name, new_node_name))
                    added_input_tensors_names.add(input_tensor_name)

        # edge from inside sub-graph to outside sub-graph
        for out_node_name, edge_attrs in get_outputs(graph, node_name):
            if out_node_name not in nodes_to_merge_names:
                log.debug(
                    "Creating edge from inside of sub-graph to outside sub-graph: {} -> {}"
                    .format(new_node_name, out_node_name))
                out_name = internal_output_name_for_node(
                    node_name, edge_attrs['out'])
                if out_name not in added_new_node_output_tensors.keys():
                    added_new_node_output_tensors[out_name] = find_output_port(
                        new_node, outputs_desc, node_name, edge_attrs['out'])
                graph.add_edge(
                    new_node_name, out_node_name,
                    **merge_edge_props(
                        {
                            'in': edge_attrs['in'],
                            'out': added_new_node_output_tensors[out_name],
                            'internal_output_node_name': out_name,
                            'in_attrs': ['in', 'internal_input_node_name'],
                            'out_attrs': ['out', 'internal_output_node_name']
                        }, edge_attrs))
        new_node['output_tensors_names'] = [
            val for val in
            {v: k
             for k, v in added_new_node_output_tensors.items()}.values()
        ]

    # add nodes using the same order as in initial GraphDef so we can dump them to IR in "correct" order
    new_node['nodes_order'] = [
        node for node in graph.graph['initial_nodes_order']
        if node in new_node['pbs'].keys()
    ]

    for n in nodes_to_merge_names:
        if graph.has_node(
                n):  # check if not deleted by another (similar) pattern
            graph.remove_node(n)
    return Node(graph, new_node_name)