Пример #1
0
def protobuf2nx(pb):
    '''Convert proto message with ONNX model to equivalent NX representation.
    All nodes and edges are restored here as ONNX model has op/data representation,
    that means that nodes are connected via tensor names. Name of tensors are defined
    on demand in nodes, so we have a code similar to Caffe here. '''
    # graph = create_graph_with_nodes(pb.graph.node, get_id=node_id, get_attrs=protobuf_attrs)
    # convert initializers to a NX graph for easier control of model consistency and to use it as a dictionary later
    initializers = create_graph_with_nodes(pb.graph.initializer,
                                           get_id=lambda pb: pb.name,
                                           get_attrs=protobuf_attrs)

    graph = Graph()

    # maps a tensor name to a node produced it and the node port: str -> (node_id, node_port)
    data_nodes_map = {}

    # first go through all inputs and separate constant from placeholders
    for inp in pb.graph.input:
        name = str(inp.name)
        if graph.has_node(name):
            raise Error(
                'Name {} of input node already exists, input names are duplicated.',
                name)
        elif initializers.has_node(name):
            # this is a constant
            graph.add_node(name,
                           kind='op',
                           op='Const',
                           pb=inp,
                           pb_init=initializers.node[name]['pb'])
        else:
            # this is a placeholder
            graph.add_node(name, kind='op', op='Parameter', pb=inp)
        # add to a tensors map
        assert not name in data_nodes_map, 'Inconsistency between data_nodes_map and graph.nodes'
        data_nodes_map[name] = (name, 0)

    # go over all initializer and make sure that all of them are added to the graph
    for initializer in initializers.nodes():
        initializer_id = 'onnx_initializer_node_' + initializer
        if not graph.has_node(initializer_id):
            graph.add_node(initializer_id,
                           kind='op',
                           op='Const',
                           pb=initializers.node[initializer]['pb'],
                           pb_init=initializers.node[initializer]['pb'])
            data_nodes_map[initializer] = (initializer_id, 0)

    # Go through all nodes in the original model order (because data nodes are defined on-the-fly and order is
    # important)
    for node in pb.graph.node:
        # create an NX node
        id = graph.unique_id(node_id(node))
        graph.add_node(id, pb=node, kind='op')

        # add incoming edges based on data_nodes_map
        for dst_port, inp in enumerate(node.input):
            # should add edge inp --> id
            if inp not in data_nodes_map:
                if inp == '':
                    # input is omitted; most likely it corresponds to an optional input for an operator
                    continue
                else:
                    raise Error(
                        'Reference to {} is not satisfied. A node refer not existing data tensor. ONNX model is not '
                        'consistent. Protobuf fragment: {}', inp, node)
            src_id, src_port = data_nodes_map[inp]

            assert (graph.has_node(src_id))
            edge_attrs = {
                'out': src_port,
                'in': dst_port,
                'name': inp,
                'fw_tensor_debug_info': [(inp, inp)],
                'in_attrs': ['in', 'name'],
                'out_attrs': ['out', 'name'],
                'data_attrs': ['fw_tensor_debug_info']
            }
            graph.add_edge(src_id, id, **edge_attrs)

        # add outgoing edges to data_nodes_map
        for src_port, out in enumerate(node.output):
            if out in data_nodes_map:
                log.debug("Detected reuse of blob {}.".format(out))
            data_nodes_map[out] = (id, src_port)

    return graph
Пример #2
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_data_nodes():
            # Get all requested shapes for current node
            # This mapping will contain pairs like {shape:[list of consumers nodes]}
            mapping = {}
            for consumer in node.out_nodes():
                edge_attrs = graph.get_edge_data(node.id, consumer.id)[0]
                if 'new_shape' in edge_attrs:
                    if np.array_equal(edge_attrs['new_shape'], node.shape):
                        continue
                    new_shape = tuple([x for x in edge_attrs['new_shape']])
                    if not new_shape in mapping:
                        mapping.update({new_shape: [consumer]})
                    else:
                        mapping[new_shape].append(consumer)

            if node.has_valid('value'):
                # Check that requested shape are the same
                # In case if they are different, we duplicate them
                for shape_key in mapping.keys():
                    shape = list(shape_key)
                    new_value = np.reshape(node.value, shape)
                    node_copy = Op.create_input_data_node(
                        graph, node.id + '/copy', value=np.array(new_value))
                    for consumer in mapping[shape_key]:
                        edge_attrs = graph.get_edge_data(node.id,
                                                         consumer.id)[0]
                        del edge_attrs['new_shape']

                        # Remove edge from previous data node and connect new data node with its consumer
                        graph.remove_edge(node.id, consumer.id)
                        graph.add_edge(node_copy.id, consumer.id, **edge_attrs)
            else:
                # Insert Reshape layer between data node and consumer
                for shape_key in mapping.keys():
                    shape = list(shape_key)
                    reshape = Reshape(
                        graph, attrs={'name': 'EltwiseReshapeNormalization'})
                    reshape_dim = Const(graph, {
                        'value': shape
                    }).create_node_with_data()
                    reshape_data = reshape.create_node_with_data(
                        inputs=[node, reshape_dim])

                    # Iterate over consumers and reconnect them to Reshape layer output
                    for consumer in mapping[shape_key]:
                        edge_attrs = graph.get_edge_data(node.id,
                                                         consumer.id)[0]
                        del edge_attrs['new_shape']

                        # Reconnect edge from original data node to Reshape output datanode
                        graph.remove_edge(node.id, consumer.id)
                        graph.add_edge(reshape_data.id, consumer.id,
                                       **edge_attrs)
Пример #3
0
    def update_custom_replacement_attributes(self, graph: Graph):
        if not self.has('instances'):
            raise Error(
                "No instance(s) is(are) defined for the custom replacement '{}'. "
                .format(self.replacement_id) + refer_to_faq_msg(66))
        if not isinstance(self.instances, dict):
            raise Error(
                "The instance must be a single dictionary for the custom replacement with id '{}'. "
                .format(self.replacement_id) + refer_to_faq_msg(67))

        start_points = self.get_internal_input_nodes(graph)
        end_points = self.get_internal_output_nodes(graph)

        matched_nodes = sub_graph_between_nodes(graph, start_points,
                                                end_points)
        output_tensors = set()
        input_nodes_mapping = dict(
        )  # key is the input tensor name, value is the pair: (input_port, output_node_name)
        for src_node_name, dst_node_name, edge_attrs in graph.edges(data=True):
            dst_node = graph.node[dst_node_name]

            # edge outside sub-graph into sub-graph
            if (src_node_name not in matched_nodes) and (dst_node_name
                                                         in matched_nodes):
                tensor_name = src_node_name + ":" + str(edge_attrs['out'])
                if tensor_name not in input_nodes_mapping:
                    input_nodes_mapping[tensor_name] = list()
                input_nodes_mapping[tensor_name].append(
                    ('^' + dst_node_name + '$', edge_attrs['in']))

            # edge from inside sub-graph to outside sub-graph
            if (src_node_name in matched_nodes) and (dst_node_name
                                                     not in matched_nodes):
                output_tensors.add(
                    ('^' + dst_node['pb'].input[edge_attrs['in']] + '$',
                     edge_attrs['out']))

        for node_name in graph.nodes():
            node = Node(graph, node_name)
            if node_name in matched_nodes and len(
                    node.out_nodes()) == 0 and node['pb'].op != 'Const':
                log.debug(
                    "Node {} doesn't have output edges. Consider it output".
                    format(node_name))
                output_tensors.add(('^' + node_name + '$', 0))

        if not self.has('inputs'):
            self._replacement_desc['inputs'] = [[{
                'node': desc[0],
                'port': desc[1]
            } for desc in inp] for inp in sorted(input_nodes_mapping.values())]
            log.debug('Updated inputs of sub-graph for instance "{}"'.format(
                self.instances))

        if not self.has('outputs'):
            self._replacement_desc['outputs'] = [{
                'node': node,
                'port': port
            } for node, port in sorted(output_tensors)]
            log.debug('Updated outputs of sub-graph for instance "{}"'.format(
                self.instances))
Пример #4
0
def build_graph_with_edge_attrs(nodes_attrs: dict,
                                edges: list,
                                update_attributes: dict = None):
    """
    Build the Graph with specific nodes and edges.
    :param nodes_attrs: dictionary where key is the node name and the value is the dictionary with node attributes.
    :param edges: list of pairs with start and end node names of the edge.
    :param update_attributes: optional dictionary which specifies nodes names and their attributes to be updated. The
    key is a node name to update attribute and the value is a dictionary with attribute name and its value.
    :return: generated graph.
    """
    graph = Graph()
    for node_1, node_2, attr in edges:
        if node_1 not in graph.nodes():
            graph.add_node(node_1, **deepcopy(nodes_attrs[node_1]))
        if node_2 not in graph.nodes():
            graph.add_node(node_2, **deepcopy(nodes_attrs[node_2]))
        graph.add_edge(node_1, node_2, **attr)
    if update_attributes is not None:
        for node_name, new_attrs in update_attributes.items():
            assert (node_name in graph.nodes())
            for attr, value in new_attrs.items():
                graph.node[node_name][attr] = value
    return graph
Пример #5
0
    def replace_pattern(graph: Graph, match: dict):
        node = match['op']

        if node.name == 'iteration_number_out':
            return

        # calculate length of context when state of inference becomes meaningful
        inputs = []
        for n in graph.get_op_nodes(**{'op': 'Parameter'}):
            inputs.append(n)

        in_nodes = []
        for inp in inputs:
            for ins in inp.out_port(0).get_destinations():
                in_nodes.append(ins.node.name)

        context_len = 1
        try:
            subgraph = invert_sub_graph_between_nodes(
                graph, [node.in_port(0).get_source().node.name], in_nodes)
        except Error:
            return

        for n in subgraph:
            n_node = Node(graph, n)
            if n_node.kind == 'op' and n_node.op == 'Splice':
                context_len += len(n_node.context) - 1

        if context_len == 1:
            return

        in_node_port = node.in_port(0).get_source()
        in_node_shape = node.in_port(0).data.get_shape()
        node.in_port(0).disconnect()

        # add Select before saving state to avoid saving garbage
        select_node = Select(graph, {
            'name': 'select_' + node.name
        }).create_node()
        zero_else = Const(graph, {
            'name': 'zero_else',
            'value': np.zeros(in_node_shape)
        }).create_node()
        select_node.in_port(1).connect(in_node_port)
        select_node.in_port(2).connect(zero_else.out_port(0))

        # check if we have already appropriate iteration counter
        existing_counters = find_pattern_matches(
            graph,
            nodes=[('mem_in', dict(op='ReadValue')),
                   ('mem_in_data', dict(shape=int64_array([context_len]))),
                   ('crop_mem_in',
                    dict(op='Crop',
                         axis=int64_array([1]),
                         offset=int64_array([1]),
                         dim=int64_array([context_len - 1]))),
                   ('crop_mem_in_data', dict()),
                   ('concat', dict(op='Concat', axis=1)),
                   ('concat_data', dict()), ('const_1', dict(op='Const')),
                   ('const_1_data', dict()), ('mem_out', dict(op='Assign')),
                   ('crop_out',
                    dict(op='Crop',
                         axis=int64_array([1]),
                         offset=int64_array([0]),
                         dim=int64_array([1]))), ('crop_out_data', dict()),
                   ('select', dict(op='Select'))],
            edges=[('mem_in', 'mem_in_data'), ('mem_in_data', 'crop_mem_in'),
                   ('crop_mem_in', 'crop_mem_in_data'),
                   ('crop_mem_in_data', 'concat', {
                       'in': 0
                   }), ('const_1', 'const_1_data'),
                   ('const_1_data', 'concat', {
                       'in': 1
                   }), ('concat', 'concat_data'), ('concat_data', 'mem_out'),
                   ('concat_data', 'crop_out'), ('crop_out', 'crop_out_data'),
                   ('crop_out_data', 'select')])
        counter_match = next(existing_counters, None)
        if counter_match is not None:
            ones = Node(graph, inverse_dict(counter_match)['const_1'])
            input_port = Node(
                graph,
                inverse_dict(counter_match)['crop_out']).out_port(0)
        else:
            init_value_mem_out = create_zero_value_with_batch_from_input(
                in_node_port, context_len, np.int32)
            mem_out = ReadValue(
                graph, {
                    'name': 'iteration_number',
                    'variable_id': 'iteration_' + node.name
                }).create_node()
            mem_out.in_port(0).connect(init_value_mem_out.out_port(0))
            cut_first = Crop(
                graph, {
                    'name': 'cut_first',
                    'axis': int64_array([1]),
                    'offset': int64_array([1]),
                    'dim': int64_array([context_len - 1])
                }).create_node()
            cut_first.in_port(0).connect(mem_out.out_port(0))
            ones = Const(graph, {
                'name': 'ones',
                'value': np.ones([1, 1], dtype=np.int32)
            }).create_node()
            concat = Concat(graph, {
                'name': 'concat_ones',
                'in_ports_count': 2,
                'axis': 1
            }).create_node()
            concat.in_port(0).connect(cut_first.out_port(0))
            concat.in_port(1).connect(ones.out_port(0))
            mem_in = Assign(
                graph, {
                    'name': 'iteration_number_out',
                    'variable_id': 'iteration_' + node.name
                }).create_node()
            mem_in.in_port(0).connect(concat.out_port(0))
            res = Result(graph, {}).create_node()
            mem_in.out_port(0).connect(res.in_port(0))
            cut_last = Crop(
                graph, {
                    'name': 'cut_last',
                    'axis': int64_array([1]),
                    'offset': int64_array([0]),
                    'dim': int64_array([1])
                }).create_node()
            cut_last.in_port(0).connect(concat.out_port(0))
            input_port = cut_last.out_port(0)

        # Check if data from memory is 1
        # if it is True, we have correct data and should proceed with saving it to memory
        # else we have not gathered context and have garbage here, shouldn't change initial state of memory
        cast_in = Equal(graph, {
            'name': input_port.node.name + '/cast_to_bool'
        }).create_node()
        cast_in.in_port(0).connect(ones.out_port(0))
        cast_in.in_port(1).connect(input_port)
        select_node.in_port(0).connect(cast_in.out_port(0))
        select_node.out_port(0).connect(node.in_port(0))
        select_node.out_port(0).data.set_shape(in_node_shape)
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['reduce']

        if node.out_port(0).data.get_value() is not None:
            # We leave Reduce* operations located in constant sub-graph as is
            # to keep model reshapable with --keep_shape_ops cli key
            return

        reduce_type = node.type
        if reduce_type not in self.pool_method_map:
            log.error(
                "Reduce type {} is not included in pool_method_map. Please update pool_method_map with new key "
                "{}".format(reduce_type, reduce_type))
            return

        input_data = node.in_node()
        output_data = node.out_node()

        input_shape = node.in_port(0).data.get_shape()
        output_shape = node.out_port(0).data.get_shape()

        # normalize node axes to exclude negative indices
        axes_data_value = node.in_port(1).data.get_value()
        axes = int64_array([
            axes_data_value.item()
        ]) if axes_data_value.size == 1 else axes_data_value
        axes = [get_canonical_axis_index(input_shape, a) for a in axes]
        axes = sorted(axes)

        # Check that values in axes list are consecutive
        for idx in range(1, len(axes)):
            if axes[idx] != (axes[idx - 1] + 1):
                log.error(
                    "Reduce with not consecutive axes {} is not supported ".
                    format(axes))
                return
        # So now we are sure that we can convert Reduce to appropriate operation

        # 1. Calculate shape that will be used in reduction
        reduction_dim = np.prod([input_shape[idx] for idx in axes])
        begin_dims = np.array([input_shape[idx] for idx in range(axes[0])])
        end_dim = np.prod([
            input_shape[idx] for idx in range(axes[-1] + 1, len(input_shape))
        ])

        # 2. Create reshape with appropriate shape
        if len(begin_dims) > 2:
            if 0 not in axes:
                begin_dims = int64_array(
                    [begin_dims[0], np.prod(begin_dims[1:])])
            else:
                begin_dims = int64_array(
                    [np.prod(begin_dims[0:-1]), begin_dims[-1]])
        else:
            # Expand begin_dims to 2
            begin_dims = int64_array(
                np.append(begin_dims, [1] * (2 - len(begin_dims))))

        reshape_shape = np.array([*begin_dims, reduction_dim, end_dim],
                                 dtype=np.int64)
        pool_window = np.array([1, 1, reduction_dim, 1], dtype=np.int64)

        # 3. Reduce => Reshape->Pooling->Reshape
        reshape_op = Reshape(graph, {'name': node.id + '/Reshape'})
        reshape_dim_const_data = Const(graph, {
            'name': node.id + '/Reshape/Dim',
            'value': reshape_shape
        }).create_node_with_data()

        final_reshape_op = Reshape(graph, {'name': node.id + '/FinalReshape'})
        final_reshape_dim_const_data = Const(graph, {
            'name': node.id + '/FinalReshape/Dim',
            'value': output_shape
        }).create_node_with_data()
        pooling_op = Pooling(
            graph,
            dict(name=node.id + '/Pool',
                 window=pool_window,
                 output_spatial_shape=None,
                 batch_dims=int64_array([0]),
                 channel_dims=int64_array([1]),
                 exclude_pad='false',
                 pool_method=self.pool_method_map[reduce_type]))

        graph.remove_edge(input_data.id, node.id)
        graph.remove_edge(node.id, output_data.id)

        final_reshape_op.create_node_with_data(inputs=[
            pooling_op.create_node_with_data(inputs=[
                reshape_op.create_node_with_data(
                    inputs=[input_data, reshape_dim_const_data])
            ]), final_reshape_dim_const_data
        ],
                                               data_nodes=output_data)

        # convert batch dimension to 0 to produce reshape-able IR over the batch dimension
        if 0 not in axes:
            reshape_dim_const_data.in_node(0).value[0] = 0
            final_reshape_dim_const_data.in_node(0).value[0] = 0

        # 4. If it is reduction with summation, we need to multiply by size of the reduction slice with Mul op
        if reduce_type == 'ReduceSum':
            output_data.in_node().insert_node_with_data_after(
                output_data, AttributedPower, {
                    'name': node.name + '/Mul',
                    'scale': float(reduction_dim)
                })
Пример #7
0
def build_graph_with_edge_attrs(nodes_attrs: dict, edges: list, update_attributes: dict = None,
                                cli: Namespace = Namespace(static_shape=False, data_type='FP32')):
    """
    Build the Graph with specific nodes and edges.
    :param nodes_attrs: dictionary where key is the node name and the value is the dictionary with node attributes.
    :param edges: list of pairs with start and end node names of the edge.
    :param update_attributes: optional dictionary which specifies nodes names and their attributes to be updated. The
    key is a node name to update attribute and the value is a dictionary with attribute name and its value.
    :param cli: Namespace with cli keys to associate with the graph
    :return: generated graph.
    """
    graph = Graph()
    for node_1, node_2, attr in edges:
        if node_1 not in graph.nodes():
            graph.add_node(node_1, **deepcopy(nodes_attrs[node_1]))
        if node_2 not in graph.nodes():
            graph.add_node(node_2, **deepcopy(nodes_attrs[node_2]))
        graph.add_edge(node_1, node_2, **attr)
    if update_attributes is not None:
        for node_name, new_attrs in update_attributes.items():
            assert (node_name in graph.nodes())
            for attr, value in new_attrs.items():
                graph.node[node_name][attr] = value

    for node in graph.get_op_nodes():
        # Add in_ports attribute
        in_edges = node.in_edges()
        for attr in in_edges.values():
            node.add_input_port(idx=attr['in'])

        # Add out_ports attribute
        out_edges = node.out_edges()
        for attr in out_edges.values():
            node.add_output_port(idx=attr['out'])

    graph.graph['cmd_params'] = cli
    return graph
Пример #8
0
def emit_ir(graph: Graph, argv: argparse.Namespace):
    NormalizeTI().find_and_replace_pattern(graph)
    for_graph_and_each_sub_graph_recursively(
        graph,
        RemoveConstOps().find_and_replace_pattern)
    for_graph_and_each_sub_graph_recursively(
        graph,
        CreateConstNodesReplacement().find_and_replace_pattern)

    if 'feManager' in argv:
        del argv.feManager

    mean_data = deepcopy(graph.graph['mf']) if 'mf' in graph.graph else None
    input_names = deepcopy(
        graph.graph['input_names']) if 'input_names' in graph.graph else []

    prepare_emit_ir(graph=graph,
                    data_type=graph.graph['cmd_params'].data_type,
                    output_dir=argv.output_dir,
                    output_model_name=argv.model_name,
                    mean_data=mean_data,
                    input_names=input_names,
                    meta_info=get_meta_info(argv),
                    use_temporary_path=True)

    # This graph cleanup is required to avoid double memory consumption
    graph.clear()

    if not (argv.framework == 'tf'
            and argv.tensorflow_custom_operations_config_update):
        output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd()
        orig_model_name = os.path.normpath(
            os.path.join(output_dir, argv.model_name))

        return_code = "not executed"
        # This try-except is additional reinsurance that the IE
        # dependency search does not break the MO pipeline
        try:
            if not argv.legacy_ir_generation:
                path_to_offline_transformations = os.path.join(
                    os.path.realpath(os.path.dirname(__file__)), 'back',
                    'offline_transformations.py')
                status = subprocess.run([
                    sys.executable, path_to_offline_transformations,
                    "--input_model", orig_model_name, "--framework",
                    argv.framework, "--transform", argv.transform
                ],
                                        env=os.environ)
                return_code = status.returncode
        except Exception as e:
            return_code = "failed"
            log.error(e)

        message = str(
            dict({
                "platform": platform.system(),
                "mo_version": get_simplified_mo_version(),
                "ie_version": get_simplified_ie_version(env=os.environ),
                "python_version": sys.version,
                "return_code": return_code
            }))
        t = tm.Telemetry()
        t.send_event('mo', 'offline_transformations_status', message)

        if return_code != 0:
            raise Error("offline transformations step has failed.")

        for suf in [".xml", ".bin", ".mapping"]:
            # remove existing files
            path_to_file = orig_model_name + "_tmp" + suf
            if os.path.exists(path_to_file):
                os.remove(path_to_file)

        # add meta information to IR
        append_ir_info(file=orig_model_name,
                       meta_info=get_meta_info(argv),
                       mean_data=mean_data,
                       input_names=input_names)

        print('[ SUCCESS ] Generated IR version {} model.'.format(
            get_ir_version(argv)))
        print('[ SUCCESS ] XML file: {}.xml'.format(orig_model_name))
        print('[ SUCCESS ] BIN file: {}.bin'.format(orig_model_name))

    return 0
Пример #9
0
def merge_nodes(graph: Graph,
                nodes_to_merge_names: list,
                inputs_desc: list = None,
                outputs_desc: list = None):
    """
    Merges nodes specified in the set 'nodes_to_merge_names' into one mega-node, creating new edges between mega-node
    and inputs/outputs nodes of the mega-node. The added edges contain name of input/output nodes which will be used for
    generation of placeholders and will be saved to the IR xml so IE plug-in know how to map input/output data for the
    layer. Also the function adds protobufs of the nodes of the sub-graph and 'Const' ops consumed by nodes in the
    sub-graph to the node's attribute 'pbs'.
    :param graph: the graph object to operate on.
    :param nodes_to_merge_names: list of nodes names that should be merged into a single node.
    :param inputs_desc: optional list describing input nodes order.
    :param outputs_desc: optional list describing output nodes order.
    """
    if not is_connected_component(graph, nodes_to_merge_names):
        log.warning(
            "The following nodes do not form connected sub-graph: {}".format(
                nodes_to_merge_names))
        # graph.dump_graph_for_graphviz(nodes_to_dump=nodes_to_merge_names)

    new_node_name = graph.unique_id("TFSubgraphCall_")
    log.info("Create new node with name '{}' for nodes '{}'".format(
        new_node_name, ', '.join(nodes_to_merge_names)))
    graph.add_node(new_node_name)
    new_node_attrs = graph.node[new_node_name]

    new_node_attrs['name'] = new_node_name
    set_tf_custom_call_node_attrs(new_node_attrs)
    new_node = Node(graph, new_node_name)

    added_input_tensors_names = set(
    )  # set of tensors that are were added as input to the sub-graph
    added_new_node_output_tensors = dict(
    )  # key - tensor name, value - out port

    for node_name in nodes_to_merge_names:
        node = Node(graph, node_name)
        add_node_pb_if_not_yet_added(node, new_node)
        # TODO: any improvements?
        for in_node_name, edge_attrs in Node(graph, node_name).get_inputs():
            in_node = Node(graph, in_node_name)

            # internal edges between nodes of the sub-graph
            if in_node_name in nodes_to_merge_names:
                add_node_pb_if_not_yet_added(in_node, new_node)
                continue

            # edge outside of sub-graph into sub-graph
            if in_node_name not in nodes_to_merge_names:
                # we cannot use the 'in_node_name' as a protobuf operation name here
                # because the 'in_node_name' could be a sub-graph matched before.
                input_tensor_name = node.pb.input[edge_attrs['in']]
                if input_tensor_name not in added_input_tensors_names:
                    if not new_node.has_port('in', edge_attrs['in']):
                        new_node.add_input_port(edge_attrs['in'])
                    graph.add_edge(
                        in_node_name, new_node_name,
                        **merge_edge_props(
                            {
                                'in':
                                find_input_port(new_node, inputs_desc,
                                                node_name, edge_attrs['in']),
                                'out':
                                edge_attrs['out'],
                                'internal_input_node_name':
                                input_tensor_name,
                                'original_dst_node_name':
                                node_name,
                                'original_dst_port':
                                edge_attrs['in'],
                                'in_attrs': [
                                    'in', 'internal_input_node_name',
                                    'original_dst_node_name',
                                    'original_dst_port', 'placeholder_name'
                                ],
                                'out_attrs': ['out']
                            }, edge_attrs))
                    log.debug(
                        "Creating edge from outside of sub-graph to inside sub-graph: {} -> {}"
                        .format(in_node_name, new_node_name))
                    added_input_tensors_names.add(input_tensor_name)

        # edge from inside sub-graph to outside sub-graph
        for out_node_name, edge_attrs in Node(graph, node_name).get_outputs():
            if out_node_name not in nodes_to_merge_names:
                log.debug(
                    "Creating edge from inside of sub-graph to outside sub-graph: {} -> {}"
                    .format(new_node_name, out_node_name))
                out_name = internal_output_name_for_node(
                    node_name, edge_attrs['out'])
                if out_name not in added_new_node_output_tensors.keys():
                    added_new_node_output_tensors[out_name] = find_output_port(
                        new_node, outputs_desc, node_name, edge_attrs['out'])
                if not new_node.has_port(
                        'out', added_new_node_output_tensors[out_name]):
                    new_node.add_output_port(
                        added_new_node_output_tensors[out_name])
                graph.add_edge(
                    new_node_name, out_node_name,
                    **merge_edge_props(
                        {
                            'in': edge_attrs['in'],
                            'out': added_new_node_output_tensors[out_name],
                            'internal_output_node_name': out_name,
                            'in_attrs': ['in', 'internal_input_node_name'],
                            'out_attrs': ['out', 'internal_output_node_name']
                        }, edge_attrs))
        new_node['output_tensors_names'] = [
            val for val in
            {v: k
             for k, v in added_new_node_output_tensors.items()}.values()
        ]

    # add nodes using the same order as in initial GraphDef so we can dump them to IR in "correct" order
    new_node['nodes_order'] = [
        node for node in graph.graph['initial_nodes_order']
        if node in new_node['pbs'].keys()
    ]

    for n in nodes_to_merge_names:
        if graph.has_node(
                n):  # check if not deleted by another (similar) pattern
            graph.remove_node(n)
    return Node(graph, new_node_name)
Пример #10
0
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['slice']

        input = node.in_node(0)
        output_data = node.out_node()

        # ONNX 10 opset case
        if len(node.in_nodes()) >= 3 and node.has_valid(
                'format') and node['format'] == 'onnx':
            self.convert_onnx_slice_opset10(node)
            return

        # Caffe case
        if not node.has_valid('start') or not node.has_valid('end'):
            return

        begin = node.start
        end = node.end
        axis = node.axis if node.has_valid('axis') else np.arange(begin.size)

        # Check whether operation use only one axis or not
        axes_begin = np.zeros(len(input.shape), dtype=np.int32)
        axes_end = np.zeros(len(input.shape), dtype=np.int32)
        ss_begin = np.zeros(len(input.shape), dtype=np.int32)
        ss_end = np.zeros(len(input.shape), dtype=np.int32)
        dims = 0
        axes = np.zeros(begin.size)
        for i in range(len(axis)):
            if begin[i] != 0 or end[i] < input.shape[axis[i]]:
                dims += 1
                axes[i] = 1
                if begin[i] != 0:
                    axes_begin[axis[i]] = 1
                    ss_begin[axis[i]] = begin[i]
                if end[i] < input.shape[axis[i]]:
                    axes_end[axis[i]] = 1
                    ss_end[axis[i]] = end[i]
        axes = np.array(axes, dtype=bool)

        if dims == 1 or dims == 0:
            # If Slice use only one axis or no axis, than
            # convert Slice to StridedSlice
            ss = StridedSlice(
                graph,
                dict(new_axis_mask=np.zeros(len(output_data.shape),
                                            dtype=np.int32),
                     shrink_axis_mask=np.zeros(len(output_data.shape),
                                               dtype=np.int32),
                     ellipsis_mask=np.zeros(len(output_data.shape),
                                            dtype=np.int32),
                     begin_mask=axes_begin,
                     end_mask=axes_end))

            convert_negative_indices(ss_begin, input.shape)
            convert_negative_indices(ss_end, input.shape)

            begin_node = Const(graph, {
                'value': ss_begin,
                'force_precision': 'I32'
            }).create_node_with_data()
            end_node = Const(graph, {
                'value': ss_end,
                'force_precision': 'I32'
            }).create_node_with_data()

            ss.create_node_with_data(inputs=[input, begin_node, end_node],
                                     data_nodes=[output_data])
            # Remove unnecessary edges from and to to Slice vertex
            graph.remove_edge(input.id, node.id)
            graph.remove_edge(node.id, output_data.id)
        else:
            # If Slice use more than one axis use Crop layer
            crop = Crop(graph, dict(axis=axis[axes], offset=begin[axes]))
            # creating node with data
            crop.create_node_with_data(inputs=[input],
                                       data_nodes=[output_data])

            # Remove unnecessary edges from and to to Slice vertex
            graph.remove_edge(input.id, node.id)
            graph.remove_edge(node.id, output_data.id)
Пример #11
0
def load_parallel_component(file_descr, graph: Graph, prev_layer_id):
    """
    Load ParallelComponent of the Kaldi model.
    ParallelComponent contains parallel nested networks.
    VariadicSplit is inserted before nested networks.
    Outputs of nested networks concatenate with layer Concat.

    :param file_descr: descriptor of the model file
    :param graph: graph with the topology.
    :param prev_layer_id: id of the input layers for parallel component layer
    :return: id of the concat layer - last layer of the parallel component layers
    """
    nnet_count = read_token_value(file_descr, b'<NestedNnetCount>')
    log.debug(
        'Model contains parallel component with {} nested networks'.format(
            nnet_count))

    split_points = []
    outputs = []
    inputs = []

    for i in range(nnet_count):
        read_token_value(file_descr, b'<NestedNnet>')
        collect_until_token(file_descr, b'<Nnet>')
        g = Graph()
        load_kalid_nnet1_model(g, file_descr, 'Nested_net_{}'.format(i))

        # input to nnet1 models is of a rank 1 but we also insert batch_size to 0th axis
        # 1st axis contains input_size of the nested subnetwork
        # we split input from the main network to subnetworks
        input_node = Node(g, 'Parameter')
        split_points.append(input_node['shape'][1])
        g.remove_node(input_node.id)

        mapping = {
            node: graph.unique_id(node)
            for node in g.nodes(data=False) if node in graph
        }
        g = nx.relabel_nodes(g, mapping)
        for val in mapping.values():
            g.node[val]['name'] = val
        graph.add_nodes_from(g.nodes(data=True))
        graph.add_edges_from(g.edges(data=True))
        sorted_nodes = tuple(nx.topological_sort(g))

        outputs.append(Node(graph, sorted_nodes[-1]))
        inputs.append(Node(graph, sorted_nodes[0]))

    split_id = graph.unique_id(prefix='NestedNets/VariadicSplit')
    attrs = {
        'out_ports_count': nnet_count,
        'size_splits': split_points,
        'axis': 1,
        'name': split_id
    }
    variadic_split_node = AttributedVariadicSplit(graph, attrs).create_node()
    prev_layer_node = Node(graph, prev_layer_id)
    prev_layer_node.add_output_port(0)
    graph.create_edge(
        prev_layer_node, variadic_split_node, 0, 0,
        create_edge_attrs(prev_layer_id, variadic_split_node.id,
                          prev_layer_id))

    concat_id = graph.unique_id(prefix='Concat')
    graph.add_node(concat_id, parameters=None, op='concat', kind='op')
    concat_node = Node(graph, concat_id)

    # Connect each output of variadic_split_node to each subnetwork's inputs in ParallelComponent
    # and each subnetwork's output to concat_node
    for i, (input_node, output_node) in enumerate(zip(inputs, outputs)):
        output_node.add_output_port(0)
        concat_node.add_input_port(i)
        graph.create_edge(
            output_node, concat_node, 0, i,
            create_edge_attrs(output_node.id, concat_id, output_node.id, i, 0))
        graph.create_edge(
            variadic_split_node, input_node, i, 0,
            create_edge_attrs(variadic_split_node.id, input_node.id,
                              variadic_split_node.id, 0, i))
    return concat_id
Пример #12
0
 def find_and_replace_pattern(self, graph: Graph):
     for sub in graph.get_op_nodes(op='Sub'):
         self.sub_to_add_replacement(sub)
 def test_caffe_pb_to_nx_old_styled_multi_input(self):
     proto = caffe_pb2.NetParameter()
     text_format.Merge(proto_str_old_styled_multi_input + layer_proto_str,
                       proto)
     self.assertRaises(Error, caffe_pb_to_nx, Graph(), proto, None)
Пример #14
0
    def replace_op(self, graph: Graph, node: Node):
        if node.use_peephole:
            raise Error(
                "BlockLSTM operation is not supported with `use_peephole`==True. Node: {}"
                "".format(node.soft_get('name')))

        if node.cell_clip != -1:
            raise Error(
                "Clipping is not supported for BlockLSTM operation. `cell_clip`={!s} for node: {}"
                "".format(node.cell_clip, node.soft_get('name')))

        log.debug(
            "Start BlockLSTM->LSTMSequence translation for node: {} with parameters:\n"
            "`cell_clip`={!s}, `use_peephole`=={!s}, `forget_bias`={!s}\n"
            "inputs: {},\noutputs:{}".format(
                node.soft_get('name'), node.cell_clip, node.use_peephole,
                node.forget_bias,
                {p: i.id
                 for p, i in node.in_nodes().items()},
                {p: o.id
                 for p, o in node.out_nodes().items()}))

        log.debug(
            "Cutting all inputs for peephole connection (5, 6, 7 input ports) off, as `use_peephole`=False"
        )

        for p, input_data in node.in_nodes().items():
            if p in [5, 6, 7]:
                key = self.find_key_by_input_port(node.in_node(p), node, p)
                assert key is not None
                graph.remove_edge(node.in_node(p).id, node.id, key=key)

        log.debug("Cutting seq_len_max input off")
        graph.remove_edge(node.in_node(0).id, node.id)
        """
        Reconnecting input edges of LSTMSequence:
        TF input edges:             Description:                 MO input edges:
              1                          input                        0
              4                         weights                       1
              8                         biases                        2
              3               h_prev: initial output of cell          3
              2               cs_prev: initial cell state             4
        """
        inputs = node.in_edges()
        assert 1 in inputs, "Sequence input to the BlockLSTM is required (1 port). Node {}".format(
            node.id)
        assert 2 in inputs, "Value of the initial cell state is required (2 port). Node {}".format(
            node.id)
        assert 3 in inputs, "Initial output of cell is required input to BlockLSTM (3 port). Node {}".format(
            node.id)
        assert 4 in inputs, "The weight matrix is required input to BlockLSTM (4 port) . Node {}".format(
            node.id)
        assert 8 in inputs, "The bias vector is required input to BlockLSTM (8 port). Node {}".format(
            node.id)

        inputs[3]['in'] = 3
        inputs[1]['in'] = 0
        inputs[4]['in'] = 1
        inputs[2]['in'] = 4
        inputs[8]['in'] = 2

        log.debug(
            "Checking for unsupported outputs usage (output ports: 0, 2, 3, 4, 5)"
        )
        for port, input_data in node.out_nodes().items():
            if port in [0, 2, 3, 4, 5]:
                raise Error(
                    "Output port {} of BlockLSTM node {} is not supported".
                    format(node.id, port))
        """
        Reconnecting output edges of LSTMSequence:
        TF output edges:             Description:                 MO output edges:
              6                     output h vector                     0
              1                   cell state before the tanh            1
        """

        outputs = node.out_edges()
        if 6 in outputs:
            outputs[6]['out'] = 0
            node.add_output_port(0, skip_if_exist=True)

        # do not replace any output edge
        return []
 def find_and_replace_pattern(self, graph: Graph, feature_channel=None):
     for node in graph.get_op_nodes(is_eltwise=True):
         self.mark_eltwise_node(node)
Пример #16
0
 def find_and_replace_pattern(self, graph: Graph):
     resize11_ops = graph.get_op_nodes(op='ONNXResize11')
     for resize in resize11_ops:
         replace_resize(graph, resize)
    def find_and_replace_pattern(self, graph: Graph):
        for quantize_node in graph.get_op_nodes(op='FakeQuantize',
                                                keep_in_IR=True):
            while len(quantize_node.out_port(0).get_destinations()) == 1:
                if not quantize_node.out_port(0).get_destination(
                ).node.has_valid('fuse_up_to_quantize_ports'):
                    break
                fuse_node = quantize_node.out_port(0).get_destination().node
                quantize_to_mul_in_port_index = quantize_node.out_port(
                    0).get_destination().idx

                # connecting the rest of model after mul to quantize, mul node hangs on quantize
                fuse_node.out_port(0).get_connection().set_source(
                    quantize_node.out_port(0))

                # mul node is disconnected from the graph
                fuse_node.in_port(quantize_to_mul_in_port_index).disconnect()

                first_port_fusion = True
                for in_quantize_port in fuse_node['fuse_up_to_quantize_ports']:
                    fuse_node_duplicate = fuse_node
                    if not first_port_fusion:
                        fuse_node_duplicate = fuse_node.copy_node({
                            'in_ports_count':
                            len(fuse_node.in_ports()),
                            'out_ports_count':
                            len(fuse_node.out_ports())
                        })

                    quantize_node.in_port(
                        in_quantize_port).get_connection().set_destination(
                            fuse_node_duplicate.in_port(
                                quantize_to_mul_in_port_index))

                    fuse_node_duplicate.out_port(0).connect(
                        quantize_node.in_port(in_quantize_port))

                    if not first_port_fusion:
                        for idx, port in fuse_node.in_ports().items():
                            if idx == quantize_to_mul_in_port_index:
                                continue
                            port.get_source().connect(
                                fuse_node_duplicate.in_port(idx))
                    fuse_node_duplicate.infer(fuse_node_duplicate)

                    first_port_fusion = False

            if 'permutation' in quantize_node.in_edge(0):
                permutation = quantize_node.in_edge(0)['permutation']
                if permutation is None:
                    continue

                perm_rank = permutation.perm.size

                if not all([
                        quantize_node.in_port(i).data.get_shape().size
                        == perm_rank for i in range(1, 5)
                ]):
                    continue

                for i in range(1, 5):
                    quantize_node.in_edge(i)['permutation'] = permutation
Пример #18
0
 def replace_pattern(graph: Graph, match: dict):
     node = match['result']
     if len(node.in_nodes()) == 0:
         graph.erase_node(node)
Пример #19
0
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['reduce']
        if not node.has_valid('reduce_type') or node.reduce_type.lower(
        ) not in self.supported_reduce_types:
            log.error("Reduce type {} is not supported for node {}".format(
                node.soft_get('reduce_type'), node.id))
            return

        reduce_type = node.reduce_type.lower()
        if reduce_type not in self.pool_method_map:
            log.error(
                "Reduce type {} is not included in pool_method_map. Please update pool_method_map with new key "
                "{}".format(reduce_type, reduce_type))
            return

        input_data = node.in_node()
        output_data = node.out_node()

        input_shape = node.in_node().shape
        output_shape = node.out_node().shape

        # normalize node.axis to exclude negative indices
        node.axis = [
            get_canonical_axis_index(input_shape, a) for a in node.axis
        ]

        axis = node.axis

        # Check that values in axis list are consecutive
        for idx in range(1, len(axis)):
            if axis[idx] != (axis[idx - 1] + 1):
                log.error(
                    "Reduce with not consecutive axes {} is not supported ".
                    format(axis))
                return

        layout = graph.graph['layout']

        # So now we are sure that we can convert Reduce to appropriate operation

        # 1. Calculate shape that will be used in reduction
        reduction_dim = np.prod([input_shape[idx] for idx in axis])
        begin_dims = np.array([input_shape[idx] for idx in range(axis[0])])
        end_dim = np.prod([
            input_shape[idx] for idx in range(axis[-1] + 1, len(input_shape))
        ])

        # 2. Create reshape with appropriate shape
        if layout == 'NCHW':
            if len(begin_dims) > 2:
                begin_dims = np.array(
                    [np.prod(begin_dims[0:-1]), begin_dims[-1]],
                    dtype=np.int64)
            else:
                # Expand begin_dims to 2
                begin_dims = np.array(np.append(begin_dims,
                                                [1] * (2 - len(begin_dims))),
                                      dtype=np.int64)
            reshape_shape = np.array([*begin_dims, reduction_dim, end_dim],
                                     dtype=np.int64)
            pool_window = np.array([1, 1, reduction_dim, 1], dtype=np.int64)
        elif layout == 'NHWC':
            begin_dims = np.prod(begin_dims)
            reshape_shape = np.array([begin_dims, reduction_dim, 1, end_dim],
                                     dtype=np.int64)
            pool_window = np.array([1, reduction_dim, 1, 1], dtype=np.int64)
        else:
            log.error('{} layout currently is not supported'.format(layout))
            return

        # 3. Reduce => Reshape->Pooling->Reshape
        reshape_op = Reshape(graph, {
            'name': node.id + '/Reshape',
            'dim': reshape_shape
        })
        final_reshape_op = Reshape(graph, {
            'name': node.id + '/FinalReshape',
            'dim': output_shape
        })
        pooling_op = Pooling(
            graph,
            dict(name=node.id + '/Pool',
                 window=pool_window,
                 output_spatial_shape=None,
                 batch_dims=np.array([get_batch_dim(layout, 4)],
                                     dtype=np.int64),
                 channel_dims=np.array([get_features_dim(layout, 4)],
                                       dtype=np.int64),
                 exclude_pad='false',
                 pool_method=self.pool_method_map[reduce_type]))

        graph.remove_edge(input_data.id, node.id)
        graph.remove_edge(node.id, output_data.id)

        final_reshape_op.create_node_with_data(inputs=[
            pooling_op.create_node_with_data(
                inputs=[reshape_op.create_node_with_data(inputs=[input_data])])
        ],
                                               data_nodes=output_data)

        # 4. If it is reduction with summation, we need to multiply by size of the reduction slice with Mul op
        if reduce_type == 'sum':
            output_data.in_node().insert_node_with_data_after(
                output_data, Power, {
                    'name': node.name + '/Mul',
                    'scale': float(reduction_dim)
                })
 def find_and_replace_pattern(self, graph: Graph):
     for node in list(graph.get_op_nodes(op='Result')):
         if len(node.in_nodes()) > 0:
             assert (len(node.in_nodes()) == 1)
         graph.remove_node(node.id)
Пример #21
0
def build_graph(nodes_attrs: dict,
                edges: list,
                update_attributes: dict = None,
                nodes_with_edges_only: bool = False):
    """
    Build the Graph with specific nodes and edges.
    :param nodes_attrs: dictionary where key is the node name and the value is the dictionary with node attributes.
    :param edges: list of pairs with start and end node names of the edge.
    :param update_attributes: optional dictionary which specifies nodes names and their attributes to be updated. The
    key is a node name to update attribute and the value is a dictionary with attribute name and its value.
    :param nodes_with_edges_only: add nodes which has at least one incoming or outcoming edge.
    :return: generated graph.
    """
    graph = Graph()

    for node_name, attrs in nodes_attrs.items():
        if 'name' not in attrs:
            attrs['name'] = node_name

    if nodes_with_edges_only:
        # filter nodes to keep only ones with edges connected
        filtered_nodes = {}
        for item in edges:
            if len(
                    item
            ) == 2:  # TODO: is there any better way in python to do that?
                node1, node2 = item
            else:
                node1, node2, _ = item
            filtered_nodes[node1] = nodes_attrs[node1]
            filtered_nodes[node2] = nodes_attrs[node2]
        nodes_attrs = filtered_nodes

    # create all nodes first
    for node, attrs in nodes_attrs.items():
        assert node not in graph.nodes()
        graph.add_node(node, **deepcopy(attrs))

    # connect nodes with edges
    for item in edges:
        if len(item
               ) == 2:  # TODO: is there any better way in python to do that?
            node_1, node_2 = item
            edge_attrs = {}
        else:
            node_1, node_2, edge_attrs = item

        common_attrs = {
            'in': len(graph.in_edges(node_2)),
            'out': len(graph.out_edges(node_1)),
            'name': nodes_attrs[node_1]['name']
        }
        common_attrs.update(edge_attrs)
        graph.add_edge(node_1, node_2, **common_attrs)

    if update_attributes is not None:
        for node_name, new_attrs in update_attributes.items():
            assert (node_name in graph.nodes(
            )), 'Node with name "{}" is not in the graph'.format(node_name)
            for attr, value in new_attrs.items():
                graph.node[node_name][attr] = value

    for node in graph.get_op_nodes():
        # Add in_ports attribute
        in_edges = node.in_edges()
        for attr in in_edges.values():
            node.add_input_port(idx=attr['in'])

        # Add out_ports attribute
        out_edges = node.out_edges()
        for attr in out_edges.values():
            node.add_output_port(idx=attr['out'])

    graph.graph['cmd_params'] = Namespace(keep_shape_ops=False)
    return graph
 def find_and_replace_pattern(self, graph: Graph):
     for node in graph.get_op_nodes(type='Const'):
         graph.remove_edge(node.id, node.out_node().id)
         graph.remove_node(node.id)
Пример #23
0
def build_graph_with_attrs(nodes_with_attrs: list,
                           edges_with_attrs: list,
                           new_nodes_with_attrs: list = [],
                           new_edges_with_attrs: list = [],
                           update_edge_attrs: dict = None,
                           update_nodes_attributes: dict = None,
                           nodes_with_edges_only: bool = False,
                           add_nodes_from_edges: bool = False):
    """
    Build the Graph with specific nodes and edges. Also update of edge and node parameters is supported.
    :param nodes_with_attrs: list of tuples ('node_name', {node_attrs})
    :param edges_with_attrs: list of tuples like (start node, end node, (optional) {attrs of the edge}).
    :param new_nodes_with_attrs: analogically nodes_with_attrs
    :param new_edges_with_attrs: analogically new_edges
    :param update_edge_attrs: optional dictionary like {('from_node', 'to_node', key): {edge_attrs}}.
    :param update_nodes_attributes: optional dictionary which specifies nodes names and their attributes to be updated. The
    key is a node name to update attribute and the value is a dictionary with attribute name and its value.
    :param nodes_with_edges_only: add nodes which has at least one incoming or outcoming edge.
    :param add_nodes_from_edges: whether nodes that is not listed in all_nodes but are in all_edges is allowed.
    :return: generated graph.
    """
    if not_all_new([node[0] for node in nodes_with_attrs],
                   [node[0] for node in new_nodes_with_attrs]):
        raise Error(
            'Some nodes from new_nodes_with_attrs are already in nodes.'
            ' Please, add to new_nodes_with_attrs only NEW nodes.')

    if not_all_new([(edge[0], edge[1]) for edge in edges_with_attrs],
                   [(edge[0], edge[1]) for edge in new_edges_with_attrs]):
        raise Error(
            'Some edges from new_edges_with_attrs are already in edges.'
            ' Please, add to new_edges_with_attrs only NEW edges.')

    # Check that all nodes from list of edges are in nodes
    all_nodes = nodes_with_attrs + new_nodes_with_attrs
    all_edges = edges_with_attrs + new_edges_with_attrs
    all_nodes_names = [node[0] for node in all_nodes]
    if not add_nodes_from_edges and not all_edges_in_nodes(
            nodes=all_nodes_names, edges=all_edges):
        raise Error(
            "Some nodes from list of edges is not in nodes. Please, add all necessary nodes."
        )

    graph = Graph()

    # Create dict for nodes with attrs
    nodes_attrs = {}
    for node_name, attrs in all_nodes:
        nodes_attrs[node_name] = attrs
        if 'name' not in attrs:
            attrs['name'] = node_name

    if nodes_with_edges_only:
        # filter nodes to keep only ones with edges connected
        filtered_nodes = {}
        for edge in all_edges:
            node_1, node_2 = edge[0], edge[1]
            filtered_nodes[node_1] = nodes_attrs[node_1]
            filtered_nodes[node_2] = nodes_attrs[node_2]
        nodes_attrs = filtered_nodes

    # Create all nodes
    for node, attrs in nodes_attrs.items():
        graph.add_node(node, **deepcopy(attrs))

    # Connect nodes with edges (also unpack edge params)
    for edge in all_edges:
        node_1, node_2 = edge[0], edge[1]
        edge_attrs = edge[2] if len(edge) == 3 else {}
        graph.add_edge(node_1, node_2, **edge_attrs)

    # Update attributes of edges
    if update_edge_attrs:
        # it will work in 2.x networkx only
        for edge, attr in update_edge_attrs.items():
            for k, v in attr.items():
                nx.set_edge_attributes(G=graph, name=k, values={edge: v})

    # Update attributes of nodes
    if update_nodes_attributes is not None:
        for node_name, new_attrs in update_nodes_attributes:
            assert (node_name in graph.nodes())
            for attr, value in new_attrs.items():
                graph.node[node_name][attr] = value

    for node_id in graph.nodes():
        node = Node(graph, node_id)
        check_and_update_ports(node, [
            graph.get_edge_data(edge[0], node_id)[0]
            for edge in graph.in_edges(node_id)
        ], True)
        check_and_update_ports(node, [
            graph.get_edge_data(node_id, edge[1])[0]
            for edge in graph.out_edges(node_id)
        ], False)

    for node in graph.get_op_nodes():
        # Add in_ports attribute
        in_edges = node.in_edges()
        for i in range(len(in_edges)):
            node.add_input_port(idx=i)

        # Add out_ports attribute
        out_edges = node.out_edges()
        for i in range(len(out_edges)):
            node.add_output_port(idx=i)
    return graph
def copy_graph_with_ops(graph: Graph) -> Graph:
    """
    Function to copy graph and apply extenders to appropriate nodes
    :param graph: Graph to copy
    :return:Copied graph with applied extenders
    """
    new_graph = Graph()
    new_graph.stage = 'back'
    new_graph.graph = graph.graph

    node_connections = dict()
    mapping_of_old_idx_into_new = dict()

    restore_correct_ports(graph)

    # Nodes preprocessing stage in source graph
    # Firstly propagate values only for Const nodes, because other preprocessings
    # assumes Const nodes are already preprocessed.
    for op in graph.get_op_nodes(type='Const'):
        preprocessing_op_nodes[op.type](op)

    for op in graph.get_op_nodes():
        if op.soft_get('type') != 'Const' and op.soft_get('type') in preprocessing_op_nodes:
            preprocessing_op_nodes[op.type](op)

    # Create a new copy of graph with correct attributes (shape & type infer, backend attrs etc.)
    for op in graph.get_op_nodes():

        # Apply extenders to nodes in source graph
        if op.type in Extender.registered_ops:
            Extender.get_extender_class_by_name(op.type).extend(op)
        else:
            log.debug('Extender for node {} with type={} not found, please note.'.format(op.name, op.type))

        # Add node with necessary type and extended attrs in new graph
        op_type = op.soft_get('type_to_create', op.type)

        if op_type in custom_ops:
            node = custom_ops[op_type](new_graph, op.attrs()).create_node()
        else:
            assert op_type in Op.registered_ops, 'Operation {} not found in MO operations, ' \
                                                 'please check it!'.format(op_type)
            node = Op.get_op_class_by_name(op_type)(new_graph, op.attrs()).create_node()

        if op.has_and_set('need_copy_input_blobs'):
            copy_input_blobs(op, node)

        # Collect node connections
        mapping_of_old_idx_into_new[op.id] = node.id
        node_connections[op.id] = collect_node_outputs(op)

    # Restore connections in new graph
    for input_node_idx, its_outputs in list(node_connections.items()):
        for out_port_idx, out_port_dest in its_outputs.items():
            for dest_in_port_idx, dest_node_idx in out_port_dest:
                src = Node(new_graph, mapping_of_old_idx_into_new[input_node_idx])
                dst = Node(new_graph, mapping_of_old_idx_into_new[dest_node_idx])
                src.out_port(out_port_idx).connect(dst.in_port(dest_in_port_idx))

    # Nodes postprocessing stage in new graph
    for op in new_graph.get_op_nodes():
        restore_tensor_names(op)

        # operations postprocessing with some special types
        if op.soft_get('type') in postprocessing_op_nodes:
            postprocessing_op_nodes[op.type](op)

    # clean up graph to shape inference
    new_graph.clean_up()

    return new_graph
Пример #25
0
    def replace_pattern(self, graph: Graph, match: dict):
        relu = match['relu']
        reshape1 = match['reshape1']
        reshape2_data = match['reshape2_data']
        conv = match['conv']

        if np.max(conv.pad) == 0:
            return

        relu_input = relu.in_node()

        # Disconnect InputData-x->ReLU->Data-x->Reshape1
        edge_attrs = graph.get_edge_data(relu.out_node().id, reshape1.id)[0]
        graph.remove_edge(relu_input.id, relu.id)
        graph.remove_edge(relu.out_node().id, reshape1.id)

        # Connect InputData-->Reshape1
        graph.add_edges_from([(relu_input.id, reshape1.id, edge_attrs)])

        # Insert ReLU:  Reshape2Data->ReLU->Data->Convolution
        edge_attrs = graph.get_edge_data(reshape2_data.id, conv.id)[0]
        graph.remove_edge(reshape2_data.id, conv.id)
        graph.add_edges_from([(reshape2_data.id, relu.id, {
            'in': 0
        }), (relu.out_node().id, conv.id, edge_attrs)])
Пример #26
0
def convert_scale_shift_to_mul_add(graph: Graph):
    nodes = graph.get_op_nodes(op='ScaleShift')
    for node in nodes:
        if node.soft_get('can_be_fused') is False:
            continue

        ports_count = len(node.in_ports())

        input_port = node.in_port(0)
        scale_port = node.in_port(1) if ports_count > 1 and not node.in_port(
            1).disconnected() else None
        shift_port = node.in_port(2) if ports_count > 2 and not node.in_port(
            2).disconnected() else None
        output_port = node.out_port(0)

        has_biases = True
        has_weights = True

        # We don't need zero biases
        if shift_port is None or (
                shift_port.data.get_value() is not None
                and all([x == 0 for x in shift_port.data.get_value()])):
            has_biases = False

        # We don't need weights with ones
        if scale_port is None or (
                scale_port.data.get_value() is not None
                and all([x == 1 for x in scale_port.data.get_value()])):
            has_weights = False

        mul_op = Mul(graph, dict(name=node.name + "/Mul_"))
        add_op = Add(graph, dict(name=node.name + "/Add_"))

        # Expand dims for current layout
        broadcast_dims_cnt = len(input_port.data.get_shape(
        )) - 2 if graph.graph['layout'] == 'NCHW' else 0

        # In case if we have constant weights/biases we have to broadcast them according to graph layout
        # otherwise we insert Reshape with broadcast dim attribute.
        def broadcast_value(port):
            value = np.array(port.data.get_value())
            for idx in range(broadcast_dims_cnt):
                value = np.expand_dims(value, axis=-1)
            port.data.set_value(value)

        def broadcast_with_reshape(port):
            input_shape = input_port.data.get_shape()
            reshape_dims = np.zeros(len(input_shape), dtype=np.int64)
            for i in range(0, node.axis):
                reshape_dims[i] = 1
            data_shape = port.data.get_shape()
            for i in range(node.axis, node.axis + len(data_shape)):
                reshape_dims[i] = data_shape[i - node.axis]
            for i in range(node.axis + len(data_shape), len(input_shape)):
                reshape_dims[i] = 1
            reshape = create_op_node_with_second_input(
                graph, Reshape, reshape_dims,
                dict(name=port.node.name + "/Broadcast_"))
            port.get_connection().set_destination(reshape.in_port(0))
            reshape.out_port(0).connect(port)

        if has_weights and scale_port.data.get_value() is not None:
            broadcast_value(scale_port)
        elif has_weights:
            broadcast_with_reshape(scale_port)

        if has_biases and shift_port.data.get_value() is not None:
            broadcast_value(shift_port)
        elif has_biases:
            broadcast_with_reshape(shift_port)

        if has_biases and has_weights:
            # Connect input->mul->out->add->out
            add_node = add_op.create_node()
            mul_node = mul_op.create_node()

            # Connect Mul operation with inputs
            input_port.get_connection().set_destination(mul_node.in_port(0))
            scale_port.get_connection().set_destination(mul_node.in_port(1))

            # Connect Add operation with inputs
            mul_node.out_port(0).connect(add_node.in_port(0))
            shift_port.get_connection().set_destination(add_node.in_port(1))

            output_port.get_connection().set_source(add_node.out_port(0))
        elif has_weights:
            # Connect input->mul->out
            mul_node = mul_op.create_node()

            # Connect Mul operation with inputs
            input_port.get_connection().set_destination(mul_node.in_port(0))
            scale_port.get_connection().set_destination(mul_node.in_port(1))

            output_port.get_connection().set_source(mul_node.out_port(0))
        elif has_biases:
            # Connect input->add->out
            add_node = add_op.create_node()

            # Connect Add operation with inputs
            input_port.get_connection().set_destination(add_node.in_port(0))
            shift_port.get_connection().set_destination(add_node.in_port(1))

            output_port.get_connection().set_source(add_node.out_port(0))
        else:
            # Connect input->out
            producer_port = input_port.get_source()
            input_port.disconnect()
            output_port.get_connection().set_source(producer_port)
Пример #27
0
    def find_and_replace_pattern(self, graph: Graph):
        visited = set()
        marked_nodes = set()
        condition_forward = lambda n: not InsertLayoutPropagationTranspose.is_nhwc_to_nchw_transpose_needed(
            n)
        condition_backward = lambda n: not InsertLayoutPropagationTranspose.is_nchw_to_nhwc_transpose_needed(
            n)
        for node_condition in self.op_conditions:
            for node in graph.get_op_nodes():
                if node_condition(node):
                    log.debug(
                        'Detected node "{}" as a node which should be executed in the original layout'
                        ''.format(node.soft_get('name', node.id)))
                    forward_visited_nodes = self.bfs([node], visited,
                                                     condition_forward, True)
                    backward_visited_nodes = self.bfs([node], visited,
                                                      condition_backward,
                                                      False)

                    # find "reinterp_shape" like ops which change rank of input to 4D or 5D from smaller dimensions
                    for back_node in backward_visited_nodes:
                        for input_node in self.get_input_nodes(back_node):
                            if input_node not in backward_visited_nodes and not condition_forward(
                                    input_node):
                                marked_nodes.add(input_node)

                    # find "reinterp_shape" like ops which change rank of input from 4D or 5D to smaller dimensions
                    for forward_node in forward_visited_nodes:
                        for output_node in self.get_output_nodes(forward_node):
                            if output_node not in forward_visited_nodes and not condition_backward(
                                    output_node):
                                marked_nodes.add(output_node)

                    marked_nodes.update(forward_visited_nodes +
                                        backward_visited_nodes)

        if len(marked_nodes):
            log.debug(
                'The following nodes will be executed in the original layout: {}'
                ''.format([n.soft_get('name', n.id) for n in marked_nodes]))

            # mark all matched nodes as in correct layout and disable attributes permutation for them
            for visited_node in marked_nodes:
                mark_as_correct_data_layout(visited_node)
                visited_node['nchw_layout'] = True

        _, nodes_weigths, nodes_in_weights = self.get_ports_and_nodes_on_weights(
            graph)
        for node in nodes_weigths:
            if node in nodes_in_weights:
                for ind, port in node.in_ports().items():
                    if ind not in nodes_in_weights[node]:
                        mark_input_as_in_correct_layout(node, ind)
                for ind, port in node.out_ports().items():
                    mark_output_as_in_correct_layout(node, ind)
            else:
                mark_as_correct_data_layout(node)
            node['nchw_layout'] = True

        for node in self.get_ports_and_nodes_on_shape_subgraphs(graph)[1]:
            mark_as_correct_data_layout(node)
            node['nchw_layout'] = True
Пример #28
0
    def test_component_map_loading_offset(self):
        test_map = "input-node name=input dim=16\n" + \
                   "component-node name=lda component=lda input=Offset(input, -3)\n" + \
                   "component-node name=tdnn1.affine component=tdnn1.affine input=Append(Offset(input, -1), Offset(lda, 1))\n" + \
                   "component-node name=tdnn1.relu component=tdnn1.relu input=tdnn1.affine\n" + \
                   "\n"
        graph = Graph(name="test_graph_component_map_loading_offset")

        test_top_map = load_topology_map(io.BytesIO(bytes(test_map, 'ascii')),
                                         graph)

        ref_map = {
            b"lda": ["lda"],
            b"tdnn1.affine": ["tdnn1.affine"],
            b"tdnn1.relu": ["tdnn1.relu"]
        }
        self.assertEqual(test_top_map, ref_map)
        self.assertTrue("input" in graph.nodes())
        self.assertListEqual(list(Node(graph, 'input')['shape']), [1, 16])

        ref_graph = build_graph(
            {
                'input': {
                    'shape': np.array([1, 16]),
                    'kind': 'op',
                    'op': 'Parameter'
                },
                'lda': {
                    'kind': 'op'
                },
                'tdnn1.affine': {
                    'kind': 'op'
                },
                'tdnn1.relu': {
                    'kind': 'op'
                },
                'append_input_lda': {
                    'kind': 'op',
                    'op': 'Concat'
                },
                'offset_in_input_3': {
                    'kind': 'op',
                    'op': 'memoryoffset',
                    't': -3,
                    'pair_name': 'offset_out_input_3'
                },
                'offset_in_input_1': {
                    'kind': 'op',
                    'op': 'memoryoffset',
                    't': -1,
                    'pair_name': 'offset_out_input_1'
                },
                'offset_in_lda_1': {
                    'kind': 'op',
                    'op': 'memoryoffset',
                    't': -1,
                    'pair_name': 'offset_out_lda_1'
                },
            }, [
                ('input', 'offset_in_input_3', {
                    'out': 0
                }),
                ('offset_in_input_3', 'lda', {
                    'out': 0
                }),
                ('lda', 'offset_in_lda_1', {
                    'out': 0
                }),
                ('input', 'offset_in_input_1', {
                    'out': 1
                }),
                ('offset_in_lda_1', 'append_input_lda', {
                    'in': 1,
                    'out': 0
                }),
                ('offset_in_input_1', 'append_input_lda', {
                    'in': 0,
                    'out': 0
                }),
                ('append_input_lda', 'tdnn1.affine', {
                    'out': 0
                }),
                ('tdnn1.affine', 'tdnn1.relu', {
                    'out': 0
                }),
            ])

        (flag, resp) = compare_graphs(graph, ref_graph, 'tdnn1.relu')
        self.assertTrue(flag, resp)
Пример #29
0
    def find_and_replace_pattern(self, graph: Graph):
        for node in graph.get_op_nodes(op='SpaceToBatch') + graph.get_op_nodes(
                op='BatchToSpace'):
            node.add_input_port(3, skip_if_exist=True)

            # convert TF representation of the pads/crops as [N, 2] to IE representation: [N] and [N]
            transposed_pads = create_op_with_const_inputs(
                graph, Transpose, {1: int64_array([1, 0])})
            node.in_port(2).get_connection().set_destination(
                transposed_pads.in_port(0))
            split_pads = create_op_with_const_inputs(graph, Split,
                                                     {1: int64_array(0)},
                                                     {'num_splits': 2})
            transposed_pads.out_port(0).connect(split_pads.in_port(0))
            for port_ind in range(2):
                node.in_port(port_ind + 2).connect(
                    split_pads.out_port(port_ind))
                node.in_port(port_ind + 2).get_connection().insert_node(
                    create_op_with_const_inputs(graph, Squeeze,
                                                {1: int64_array([0])}))

            # add zeros/ones to related inputs to align it with data input
            in0_rank = Rank(graph, {
                'name': node.name + '/rank_0'
            }).create_node()
            in1_shape = Shape(graph, {
                'name': node.name + '/rank_1'
            }).create_node()

            diff_size = Sub(graph, {
                'name': node.name + '/sub_0'
            }).create_node()
            diff = Sub(graph, {'name': node.name + '/sub_1'}).create_node()
            const_begin = Const(graph, {
                'value': int64_array([1])
            }).create_node()
            const_pad_val = Const(graph, {
                'value': int64_array(1)
            }).create_node()

            block_shape = Pad(graph, {
                'name': node.name + '/aligned_block_shape',
                'mode': 'constant'
            }).create_node()

            # in case of SpaceToBatch begin = pads_begin, end = pads_end
            # in case of BatchToSpace begin = crops_begin, end = crops_end
            new_begin_name = '/aligned_pads_begin'
            new_end_name = '/aligned_pads_end'
            if node.type == 'BatchToSpace':
                new_begin_name = '/aligned_crops_begin'
                new_end_name = '/aligned_crops_end'

            begin = Pad(graph, {
                'name': node.name + new_begin_name,
                'mode': 'constant'
            }).create_node()
            end = Pad(graph, {
                'name': node.name + new_end_name,
                'mode': 'constant'
            }).create_node()

            in0_rank_1d = create_op_node_with_second_input(
                graph, Unsqueeze, int64_array([0]),
                {'name': node.name + '/1d_rank_of_0'}, in0_rank)

            node.in_port(0).get_source().connect(in0_rank.in_port(0))
            node.in_port(1).get_source().connect(in1_shape.in_port(0))
            in0_rank_1d.out_port(0).connect(diff_size.in_port(0))
            in1_shape.out_port(0).connect(diff_size.in_port(1))
            diff_size.out_port(0).connect(diff.in_port(0))
            const_begin.out_port(0).connect(diff.in_port(1))
            const_pad_val.out_port(0).connect(block_shape.in_port(3))

            inputs_array = [block_shape, begin, end]
            for idx, input_to_node in enumerate(inputs_array):
                name_of_input_to_node = input_to_node.name
                node.in_port(idx + 1).get_connection().set_destination(
                    input_to_node.in_port(0))
                const_begin.out_port(0).connect(input_to_node.in_port(1))
                diff.out_port(0).connect(input_to_node.in_port(2))
                input_to_node.out_port(0).connect(node.in_port(idx + 1))
                convert = Cast(graph, {
                    'name': name_of_input_to_node + '/i64',
                    'dst_type': np.int64
                }).create_node()
                input_to_node.in_port(0).get_connection().insert_node(convert)
Пример #30
0
    def find_and_replace_pattern(self, graph: Graph):
        for permute_node in graph.get_op_nodes(type='Transpose'):
            if permute_node.id not in graph.nodes():
                continue

            list_of_permutes = [permute_node]
            # Get sequence of permutations
            node = permute_node
            while True:
                next_ops = get_next_operation(node)
                if len(next_ops) != 1:
                    break

                next_op = next_ops[0]
                if next_op.soft_get('type') == 'Transpose':
                    list_of_permutes.append(next_op)
                    node = next_op
                else:
                    break

            final_permutation = int64_array([
                x for x in range(
                    len(list_of_permutes[0].in_port(1).data.get_value()))
            ])
            for permute in list_of_permutes:
                order = permute.in_port(1).data.get_value()
                if order is None:
                    raise Error(
                        "Transpose node {} has wrong order for permute = None".
                        format(permute.name))
                final_permutation = final_permutation[int64_array(order)]

            if np.array_equal(final_permutation, [
                    x for x in range(
                        len(list_of_permutes[0].in_port(1).data.get_value()))
            ]):
                first_data_node, last_data_node = list_of_permutes[0].in_node(
                ), list_of_permutes[-1].out_node()
                graph.remove_edge(first_data_node.id, list_of_permutes[0].id)
            else:
                if len(list_of_permutes) < 2:
                    continue
                first_data_node, last_data_node = list_of_permutes[0].out_node(
                ), list_of_permutes[-1].out_node()
                list_of_permutes[0].in_port(1).data.set_value(
                    final_permutation)
                graph.remove_edge(first_data_node.id,
                                  first_data_node.out_node().id)

            graph.remove_edge(last_data_node.in_node().id, last_data_node.id)

            merge_data_nodes(graph, first_data_node, last_data_node)
            graph.remove_node(last_data_node.id)
            graph.clean_up()