Exemplo n.º 1
0
def compress_weights(model: Graph):
    """Apply transformations to save model weights to INT8."""
    add_removed_converts(model)
    CompressQuantizeWeights().find_and_replace_pattern(model)
    model.clean_up()
    ForceStrictPrecision().find_and_replace_pattern(model)
    model.clean_up()
Exemplo n.º 2
0
def build_graph(graph_attrs, meta_data, nodes, edges):
    """ Build the Graph with specific nodes and edges.
     :param graph_attrs: dictionary with graph attributes
     :param nodes: list of nodes where each node is tuple (node_name, type, attrs)
                  nodes=[
                      ('input', 'Parameter',  {}),
                      ('weights', 'Const', {}),
                      ('conv', 'Convolution', {}),
                      ('output', 'Result', {})
                  ]
     :param edges: list of edges where each edge is tuple (node_out, node_in, attrs)
                  edges=[
                      ('input', 'conv', {'out': 0, 'in': 0}),
                      ('weights', 'conv', {'out': 0, 'in': 1}),
                      ('conv', 'output', {'out': 0, 'in': 0})
                  ]
     :return: generated graph.
    """
    graph = Graph()
    graph.graph = graph_attrs
    graph.meta_data = meta_data

    for node in nodes:
        create_node(graph, node[0], node[1], node[2])

    for edge in edges:
        out_port = edge[2].get('out', 0)
        in_port = edge[2].get('in', 0)
        connect_nodes_by_name(graph, edge[0], out_port, edge[1], in_port)

    graph.clean_up()

    return graph
Exemplo n.º 3
0
def grouped_convolutions_fusing(graph: Graph):
    while True:
        is_fused = False
        graph.clean_up()
        for node in graph.pseudo_topological_sort():
            if node.kind == 'op' and len(node.out_nodes()) > 1:
                if node.soft_get('can_be_fused') == False:
                    continue

                is_valid_convolutions = True
                last_layer = None

                next_nodes = get_next_operation(node)
                # Check that all operation after this one are Convolutions
                # and all convolutions has same output
                if len(next_nodes) > 1 and all(_node.soft_get('type') in ['Convolution', 'Deconvolution'] for _node in next_nodes):
                    for conv in next_nodes:
                        conv_outputs = get_next_operation(conv)
                        if conv.soft_get('can_be_fused') == False:
                            is_valid_convolutions = False
                        if len(conv_outputs) != 1:
                            is_valid_convolutions = False
                        if last_layer is None:
                            last_layer = conv_outputs[0].id
                        # TODO: this check is not working for V10 where Biases appears as separate operations
                        elif conv_outputs[0].id != last_layer:
                            is_valid_convolutions = False

                    if is_valid_convolutions:
                        is_fused = concat_convolutions(graph, node, Node(graph, last_layer))
                        if is_fused:
                            break

        if not is_fused:
            break
Exemplo n.º 4
0
    def find_and_replace_pattern(self, graph: Graph):
        for permute_node in graph.get_op_nodes(type='Transpose'):
            if permute_node.id not in graph.nodes():
                continue

            list_of_permutes = [permute_node]
            # Get sequence of permutations
            node = permute_node
            while True:
                next_ops = get_next_operation(node)
                if len(next_ops) != 1:
                    break

                next_op = next_ops[0]
                if next_op.soft_get('type') == 'Transpose':
                    list_of_permutes.append(next_op)
                    node = next_op
                else:
                    break

            final_permutation = int64_array([
                x for x in range(
                    len(list_of_permutes[0].in_port(1).data.get_value()))
            ])
            for permute in list_of_permutes:
                order = permute.in_port(1).data.get_value()
                if order is None:
                    raise Error(
                        "Transpose node {} has wrong order for permute = None".
                        format(permute.name))
                final_permutation = final_permutation[int64_array(order)]

            if np.array_equal(final_permutation, [
                    x for x in range(
                        len(list_of_permutes[0].in_port(1).data.get_value()))
            ]):
                first_data_node, last_data_node = list_of_permutes[0].in_node(
                ), list_of_permutes[-1].out_node()
                graph.remove_edge(first_data_node.id, list_of_permutes[0].id)
            else:
                if len(list_of_permutes) < 2:
                    continue
                first_data_node, last_data_node = list_of_permutes[0].out_node(
                ), list_of_permutes[-1].out_node()
                list_of_permutes[0].in_port(1).data.set_value(
                    final_permutation)
                graph.remove_edge(first_data_node.id,
                                  first_data_node.out_node().id)

            graph.remove_edge(last_data_node.in_node().id, last_data_node.id)

            merge_data_nodes(graph, first_data_node, last_data_node)
            graph.remove_node(last_data_node.id)
            graph.clean_up()
Exemplo n.º 5
0
def remove_converts(graph: Graph):
    for op in graph.get_op_nodes(type='Convert'):
        source_op = op.in_port(0).get_source().node
        if source_op.type == 'Const' and source_op.data_type == np.float16:
            # Get access to data node after Convert operation and set Insert_Convert_operation_after
            # to restore Convert operation later
            op.out_node(0)['Insert_Convert_operation_after'] = True
            # Mark Const and Convert operation to fold them
            source_op['need_shape_inference'] = True
            op['stop_value_propagation'] = False
            op['need_shape_inference'] = True
    graph.clean_up()
Exemplo n.º 6
0
    def find_and_replace_pattern(self, graph: Graph):
        cleanup_called_once = False

        # walk through all Loop nodes and find Const inputs
        for loop_node in graph.get_op_nodes(op='Loop'):
            # call clean-up only once that performs constant folding
            if not cleanup_called_once:
                graph.clean_up()
                cleanup_called_once = True

            # move constant node into the body graph and removes body parameter nodes corresponding to them
            Loop.pull_constant_inputs_into_body(loop_node)

            # since some input ports can be removed after the pulling constants, normalization of Loop node is required
            Loop.normalize_input_output_ports(loop_node)

            # perform shape inference for the Loop node again since new constant can be appeared
            # and constant folding can be helpful for weights path to Convolution node inside the body graph
            loop_node['need_shape_inference'] = True
Exemplo n.º 7
0
def copy_graph_with_ops(graph: Graph) -> Graph:
    """
    Function to copy graph and apply extenders to appropriate nodes
    :param graph: Graph to copy
    :return:Copied graph with applied extenders
    """
    new_graph = Graph()
    new_graph.stage = 'back'
    new_graph.graph = graph.graph

    node_connections = dict()
    mapping_of_old_idx_into_new = dict()

    restore_correct_ports(graph)

    # Nodes preprocessing stage in source graph
    # Firstly propagate values only for Const nodes, because other preprocessings
    # assumes Const nodes are already preprocessed.
    for op in graph.get_op_nodes(type='Const'):
        preprocessing_op_nodes[op.type](op)

    for op in graph.get_op_nodes():
        if op.soft_get('type') != 'Const' and op.soft_get(
                'type') in preprocessing_op_nodes:
            preprocessing_op_nodes[op.type](op)

    # Create a new copy of graph with correct attributes (shape & type infer, backend attrs etc.)
    for op in graph.get_op_nodes():

        # Save input shapes restored from IR
        op['old_input_shapes'] = list()
        for n in op.in_nodes():
            op.old_input_shapes.append(int64_array(op.in_node(n).shape))

        # Apply extenders to nodes in source graph
        if op.type in Extender.registered_ops:
            Extender.get_extender_class_by_name(op.type).extend(op)
        else:
            log.debug(
                'Extender for node {} with type={} not found, please note.'.
                format(op.name, op.type))

        # Add node with necessary type and extended attrs in new graph
        op_type = op.soft_get('type_to_create', op.type)

        if op_type in custom_ops:
            node = custom_ops[op_type](new_graph, op.attrs()).create_node()
        else:
            if op_type not in Op.registered_ops:
                log.warning(
                    'Operation {} is not found in MO operations, please check it! '
                    'Simple shape infer function is used'.format(op_type))
                node = Op(new_graph, op.attrs()).create_node()
                assert 'type' in node, 'Operation {} have no `type` attribute.'.format(
                    node.soft_get('name'))
                node['op'] = node.type
                node['infer'] = Extender.use_shapes_from_ir
                if 'ir_data_attrs' in op:
                    node['IE'] = [('layer', [
                        ('id', lambda node: node.node), 'name', 'type',
                        'version'
                    ], [('data', list(op.ir_data_attrs.keys()), []), '@ports',
                        '@consts'])]

            else:
                node = Op.get_op_class_by_name(op_type)(
                    new_graph, op.attrs()).create_node()

            # Fill out_ports_count attribute
            if 'out_ports_count' not in node and node.soft_get(
                    'type') != 'Result':
                node['out_ports_count'] = len(op.out_edges())

        # This attribute is no longer needed and we can delete it
        if 'ir_data_attrs' in node:
            del node['ir_data_attrs']

        if op.has_and_set('need_copy_input_blobs'):
            copy_input_blobs(op, node)

        # Collect node connections
        mapping_of_old_idx_into_new[op.id] = node.id
        node_connections[op.id] = collect_node_outputs(op)

    # Restore connections in new graph
    for input_node_idx, its_outputs in list(node_connections.items()):
        for out_port_idx, out_port_dest in its_outputs.items():
            for dest_in_port_idx, dest_node_idx in out_port_dest:
                src = Node(new_graph,
                           mapping_of_old_idx_into_new[input_node_idx])
                dst = Node(new_graph,
                           mapping_of_old_idx_into_new[dest_node_idx])
                src.out_port(out_port_idx).connect(
                    dst.in_port(dest_in_port_idx))

    # Nodes postprocessing stage in new graph
    for op in new_graph.get_op_nodes():
        # Call normalize node outputs for restored operations to connect temporary Result operations for disconnected
        # output ports. We need to do that for correct shape inference. These Result operations will be removed during
        # IR emitting. For TopK operation outputs normalizing we should use specific
        # function TopKNormalizer.normalize_outputs.
        if op.soft_get('type') != 'TopK':
            Op.normalize_outputs(op)

        # Set correct_data_type attribute to Const data nodes to correct processing of restored values
        if op.soft_get('type') == 'Const':
            assert len(op.out_nodes()) == 1 and op.out_node(0).soft_get('kind') == 'data',\
                'Const node {} not properly corrected to appropriate data node'.format(op.soft_get('name'))
            op.out_node(0)['correct_data_type'] = True

            if op.has_and_set('rt_info'):
                op.out_node(0)['rt_info'] = op.rt_info

        # operations postprocessing with some special types
        if op.soft_get('type') in postprocessing_op_nodes:
            postprocessing_op_nodes[op.type](op)

        restore_tensor_names(op)

    # clean up graph to shape inference
    new_graph.clean_up()

    return new_graph