Exemplo n.º 1
0
    def find_and_replace_pattern(self, graph: Graph):
        argv = graph.graph['cmd_params']

        packed_user_shapes, packed_outputs, freeze_placeholder = user_data_repack(
            graph, argv.placeholder_shapes, argv.placeholder_data_types,
            argv.output, argv.freeze_placeholder_with_value)

        # save packed user shapes in arguments since nodes names and their ports
        # will be required to compose placeholder names with custom types
        # for MOCLegacyTransformations
        argv.packed_user_shapes = packed_user_shapes

        graph.graph['user_shapes'] = packed_user_shapes
        graph.graph['packed_outputs'] = packed_outputs
        graph.graph['freeze_placeholder'] = freeze_placeholder

        if argv.inputs_list is not None and isinstance(
                argv.inputs_list, list) and len(argv.inputs_list) > 0:
            graph.inputs_order = argv.inputs_list
        if argv.output is not None and isinstance(
                argv.output, list) and len(argv.output) > 0:
            graph.outputs_order = argv.output

        inputs = list(packed_user_shapes.keys()) \
            if packed_user_shapes is not None and isinstance(packed_user_shapes, dict) else None
        graph.graph[
            'inputs'] = inputs  # save user defined inputs for other extensions
Exemplo n.º 2
0
    def load(self, graph: Graph):
        argv = graph.graph['cmd_params']
        if argv.tensorflow_custom_layer_libraries:
            libraries = argv.tensorflow_custom_layer_libraries.split(',')
            for library in libraries:
                log.info('Loading library "{}" with custom operations'.format(
                    library))
                tf_v1.load_op_library(library)

        graph_def, variables_values, framework, inputs_outputs_order = load_tf_graph_def(
            graph_file_name=argv.input_model,
            is_binary=not argv.input_model_is_text,
            checkpoint=argv.input_checkpoint,
            user_output_node_names_list=argv.output,
            model_dir=argv.saved_model_dir,
            meta_graph_file=argv.input_meta_graph,
            saved_model_tags=argv.saved_model_tags)

        if inputs_outputs_order is not None and isinstance(
                inputs_outputs_order, tuple):
            graph.inputs_order = inputs_outputs_order[0]
            graph.outputs_order = inputs_outputs_order[1]

        send_framework_info(framework)

        try:
            tf_v1.import_graph_def(graph_def, name='')
        except:
            log.warning(
                "TensorFlow post-processing of loaded model was unsuccessful. "
                "This is an optional step that Model Optimizer performs for any input model but it is not usually "
                "required for all models. "
                "It likely means that the original model is ill-formed. "
                "Model Optimizer will continue converting this model.")

        log.debug("Number of nodes in graph_def: {}".format(len(
            graph_def.node)))  # pylint: disable=no-member

        if argv.tensorboard_logdir:
            tensorboard_util.dump_for_tensorboard(graph_def,
                                                  argv.tensorboard_logdir)

        update_extractors_with_extensions(tf_op_extractors)

        try:
            protobuf2nx(graph, graph_def)
        except Exception as e:
            raise Error(
                'Cannot pre-process TensorFlow graph after reading from model file "{}". ' \
                'File is corrupt or has unsupported format. Details: {}. ' +
                refer_to_faq_msg(44),
                argv.model_name,
                str(e)
            ) from e

        graph.__setattr__('name', argv.model_name)
        # 'layout' parameter change may cause an issue in EltwiseInputReshape replacer
        # and convert_nhwc_to_nchw(graph)
        graph.graph['layout'] = 'NCHW' if argv.disable_nhwc_to_nchw else 'NHWC'
        graph.graph['fw'] = 'tf'

        graph.graph['variables_values'] = variables_values
        del variables_values

        used_tensors = restore_edges(graph, get_tf_edges)

        # Tensor names information corresponding to a node is stored on outgoing edges.
        # As output nodes do not have outgoing edges, fake outputs are required. In the following code
        # for each output Identity node is added, and tensor name for the output is kept
        # on (output, fake output) edge. After Result nodes adding transformation fake outputs
        # are deleted from graph.
        add_outputs_identity(
            graph, graph.nodes - used_tensors,
            lambda g, output, fake_node_name: g.add_edges_from(
                [create_tf_edge(output, fake_node_name, 0)]))

        remove_control_dependency_inputs(graph)

        graph.check_empty_graph(
            'protobuf2nx. It may happen due to problems with loaded model')
        extract_node_attrs(
            graph, lambda node: tf_op_extractor(
                node, check_for_duplicates(tf_op_extractors)))

        # try to detect layout from the nodes of the graph. If there are no convolution nodes in N(D)HWC layout then we
        # consider that the graph is in NCHW layout and no layout conversion should be performed
        if not argv.disable_nhwc_to_nchw and not graph_or_sub_graph_has_nhwc_ops(
                graph):
            if not argv.silent:
                log.debug('disable_nhwc_to_nchw" was automatically enabled.')
            for_graph_and_each_sub_graph_recursively(
                graph, update_cmd_params_and_layout)

        send_op_names_info(framework, graph)
        send_shapes_info(framework, graph)
Exemplo n.º 3
0
def copy_graph_with_ops(graph: Graph) -> Graph:
    """
    Function to copy graph and apply extenders to appropriate nodes
    :param graph: Graph to copy
    :return:Copied graph with applied extenders
    """
    new_graph = Graph()
    new_graph.stage = 'back'
    new_graph.graph = graph.graph
    new_graph.inputs_order = graph.inputs_order
    new_graph.outputs_order = graph.outputs_order

    node_connections = dict()
    mapping_of_old_idx_into_new = dict()

    restore_correct_ports(graph)

    # Nodes preprocessing stage in source graph
    # Firstly propagate values only for Const nodes, because other preprocessings
    # assumes Const nodes are already preprocessed.
    for op in graph.get_op_nodes(type='Const'):
        preprocessing_op_nodes[op.type](op)

    for op in graph.get_op_nodes():
        if op.soft_get('type') != 'Const' and op.soft_get(
                'type') in preprocessing_op_nodes:
            preprocessing_op_nodes[op.type](op)

    # Create a new copy of graph with correct attributes (shape & type infer, backend attrs etc.)
    for op in graph.get_op_nodes():

        # Save input shapes restored from IR
        op['old_input_shapes'] = list()
        for n in op.in_nodes():
            op.old_input_shapes.append(int64_array(op.in_node(n).shape))

        # Apply extenders to nodes in source graph
        if op.type in Extender.registered_ops:
            Extender.get_extender_class_by_name(op.type).extend(op)
        else:
            log.debug(
                'Extender for node {} with type={} not found, please note.'.
                format(op.name, op.type))

        # Add node with necessary type and extended attrs in new graph
        op_type = op.soft_get('type_to_create', op.type)

        if op_type in custom_ops:
            node = custom_ops[op_type](new_graph, op.attrs()).create_node()
        else:
            if op_type not in Op.registered_ops:
                log.warning(
                    'Operation {} is not found in MO operations, please check it! '
                    'Simple shape infer function is used'.format(op_type))
                node = Op(new_graph, op.attrs()).create_node()
                assert 'type' in node, 'Operation {} have no `type` attribute.'.format(
                    node.soft_get('name'))
                node['op'] = node.type
                node['infer'] = Extender.use_shapes_from_ir
                if 'ir_data_attrs' in op:
                    node['IE'] = [('layer', [
                        ('id', lambda node: node.node), 'name', 'type',
                        'version'
                    ], [('data', list(op.ir_data_attrs.keys()), []), '@ports',
                        '@consts'])]

            else:
                node = Op.get_op_class_by_name(op_type)(
                    new_graph, op.attrs()).create_node()

        # Fill out_ports_count attribute
        if 'out_ports_count' not in node and node.soft_get('type') != 'Result':
            node['out_ports_count'] = len(op.out_edges())

        # This attribute is no longer needed and we can delete it
        if 'ir_data_attrs' in node:
            del node['ir_data_attrs']

        if op.has_and_set('need_copy_input_blobs'):
            copy_input_blobs(op, node)

        # Collect node connections
        mapping_of_old_idx_into_new[op.id] = node.id
        node_connections[op.id] = collect_node_outputs(op)

    # Restore connections in new graph
    for input_node_idx, its_outputs in list(node_connections.items()):
        for out_port_idx, out_port_dest in its_outputs.items():
            for dest_in_port_idx, dest_node_idx in out_port_dest:
                src = Node(new_graph,
                           mapping_of_old_idx_into_new[input_node_idx])
                dst = Node(new_graph,
                           mapping_of_old_idx_into_new[dest_node_idx])
                src.out_port(out_port_idx).connect(
                    dst.in_port(dest_in_port_idx))

    # Nodes postprocessing stage in new graph
    for op in new_graph.get_op_nodes():
        # Call normalize node outputs for restored operations to connect temporary Result operations for disconnected
        # output ports. We need to do that for correct shape inference. These Result operations will be removed during
        # IR emitting. For TopK operation outputs normalizing we should use specific
        # function TopKNormalizer.normalize_outputs.
        if op.soft_get('type') != 'TopK':
            Op.normalize_outputs(op)

        # Set correct_data_type attribute to Const data nodes to correct processing of restored values
        if op.soft_get('type') == 'Const':
            assert len(op.out_nodes()) == 1 and op.out_node(0).soft_get('kind') == 'data',\
                'Const node {} not properly corrected to appropriate data node'.format(op.soft_get('name'))
            op.out_node(0)['correct_data_type'] = True

            if op.has_and_set('rt_info'):
                op.out_node(0)['rt_info'] = op.rt_info

        # operations postprocessing with some special types
        if op.soft_get('type') in postprocessing_op_nodes:
            postprocessing_op_nodes[op.type](op)

        restore_tensor_names(op)

    # clean up graph to shape inference
    new_graph.clean_up()

    return new_graph
Exemplo n.º 4
0
def protobuf2nx(graph: Graph, pb):
    """
    Convert proto message with ONNX model to equivalent NX representation. All nodes and edges are restored here as
    ONNX model has op/data representation, that means that nodes are connected via tensor names. Name of tensors are
    defined on demand in nodes, so we have a code similar to Caffe here.

    :param graph: the Graph object to load the graph into
    :param pb: the ONNX file protobuf message
    :return: None
    """
    # maps a tensor name to a node produced it and the node port: str -> (node_id, node_port)
    data_nodes_map = {}

    graph_pb = pb.graph
    add_initializers_and_inputs_to_graph(graph, graph_pb, data_nodes_map)

    # Preserve inputs order
    graph.inputs_order = []
    for inp in graph_pb.input:
        name = str(inp.name)
        graph.inputs_order.append(name)

    output_ids = []
    for outp in graph_pb.output:
        name = str(outp.name)
        if graph.has_node(name):
            log.error('Name {} of output node already exists in graph. Ignoring this output. If the output is required,'
                      ' please rename it.'.format(name), extra={'is_warning': True})
            continue
        else:
            # add fake node on output
            graph.add_node(name, kind='op', op='FakeOutput', pb=outp)
            output_ids.append(name)

    # Preserve outputs order
    graph.outputs_order = output_ids

    # Go through all nodes in the original model order (because data nodes are defined on-the-fly and order is
    # important)
    for node in graph_pb.node:
        # create an NX node
        fw_name = node_id(node)
        id = graph.unique_id(fw_name)
        graph.add_node(id, pb=node, kind='op')
        if hasattr(graph, 'op_names_statistic') and hasattr(node, 'op_type'):
            graph.op_names_statistic[node.op_type] += 1

        # add incoming edges based on data_nodes_map
        for dst_port, inp in enumerate(node.input):
            # should add edge inp --> id
            if inp not in data_nodes_map:
                if inp == '':
                    # input is omitted; most likely it corresponds to an optional input for an operator
                    continue
                else:
                    raise Error(
                        'Reference to {} is not satisfied. A node refer not existing data tensor. ONNX model is not '
                        'consistent. Protobuf fragment: {}', inp, node)
            src_id, src_port = data_nodes_map[inp]

            assert (graph.has_node(src_id))
            edge_attrs = {
                'out': src_port,
                'in': dst_port,
                'name': inp,
                'fw_tensor_debug_info': [(src_id, inp)],
                'in_attrs': ['in', 'name'],
                'out_attrs': ['out', 'name'],
                'data_attrs': ['fw_tensor_debug_info']
            }
            graph.add_edge(src_id, id, **edge_attrs)

        # add outgoing edges to data_nodes_map
        for src_port, out in enumerate(node.output):
            if out in output_ids:
                edge_attrs = {
                    'out': src_port,
                    'in': 0,
                    'name': out,
                    'fw_tensor_debug_info': [(fw_name, out)],
                    'in_attrs': ['in', 'name'],
                    'out_attrs': ['out', 'name'],
                    'data_attrs': ['fw_tensor_debug_info']
                }
                graph.add_edge(id, out, **edge_attrs)
            if out in data_nodes_map:
                log.debug("Detected reuse of blob {}.".format(out))
            data_nodes_map[out] = (id, src_port)

    graph.graph['tensor_mapping'] = data_nodes_map  # save main graph tensor names mapping for Loop op parsing