Пример #1
0
 def shape_of_sub_graph_reinference(graph: Graph):
     """
     After layout permutation (shape change in data nodes) shape sub-graphs contain values in the old layout
     To change that we execute full partial inference on the shape-of sub-graphs
     """
     shape_ops = graph.get_op_nodes(op='ShapeOf')
     for shape in shape_ops:
         shape.infer(shape)
     LayoutChangeForConstantShapePaths().find_shape_subgraph_endpoints(
         [shape.out_port(0) for shape in shape_ops], None,
         lambda in_port: in_port.node.infer(in_port.node))
 def get_ports_and_nodes_on_shape_subgraphs(graph):
     shape_sources = {
         shape_of.out_port(0)
         for shape_of in graph.get_op_nodes(type='ShapeOf')
     }
     end_points = LayoutChangeForConstantShapePaths(
     ).find_shape_subgraph_endpoints([
         shape.out_port(0) for shape in graph.get_op_nodes(type='ShapeOf')
     ])
     ports, nodes = MarkSubGraphsWithCorrectLayout.walk_up_from_in_ports_to_out_ports(
         end_points, shape_sources)
     return ports, nodes
Пример #3
0
    def shape_of_sub_graph_reinference(graph: Graph):
        """
        After layout permutation (shape change in data nodes) shape sub-graphs contain values in the old layout
        To change that we execute full partial inference on the shape-of sub-graphs
        """
        shape_ops = graph.get_op_nodes(op='ShapeOf')
        for shape in shape_ops:
            shape.infer(shape)

        def reinfer_once(in_port: Port):
            node = in_port.node
            if not node.soft_get('reinferred', False):
                node.infer(node)
                node['reinferred'] = True

        LayoutChangeForConstantShapePaths().find_shape_subgraph_endpoints(
            out_ports=[shape.out_port(0) for shape in shape_ops], action=reinfer_once)
Пример #4
0
def tf2nx(argv: argparse.Namespace, model_file_name: str, output_model_name: str, output_dir: str,
          is_binary: bool):
    """
    Convert TF GraphDef object to NetworkX representation.
    The resulting graph is still TF-specific and needs normalization passes to be applied.
    The specific TF structure assumes each GraphDef node is converted to a single
    NetworkX node, node id is an original TF node name, and edges go directly from one op   to another op.
    """
    meta_info = get_meta_info(argv)

    if argv.tensorflow_custom_layer_libraries:
        libraries = argv.tensorflow_custom_layer_libraries.split(',')
        for library in libraries:
            log.info('Loading library "{}" with custom operations'.format(library))
            tf.load_op_library(library)

    graph_def, variables_values = load_tf_graph_def(graph_file_name=model_file_name, is_binary=is_binary,
                                                    checkpoint=argv.input_checkpoint,
                                                    user_output_node_names_list=argv.output,
                                                    model_dir=argv.saved_model_dir,
                                                    meta_graph_file=argv.input_meta_graph,
                                                    saved_model_tags=argv.saved_model_tags)

    try:
        tf.import_graph_def(graph_def, name='')
    except:
        log.warning("TensorFlow post-processing of loaded model was unsuccessful. "
                    "This is an optional step that Model Optimizer performs for any input model but it is not usually "
                    "required for all models."
                    "It likely means that the original model is ill-formed. "
                    "Model Optimizer will continue converting this model.")

    log.debug("Number of nodes in graph_def: {}".format(len(graph_def.node)))  # pylint: disable=no-member

    if argv.tensorboard_logdir:
        tensorboard.dump_for_tensorboard(graph_def, argv.tensorboard_logdir)

    update_extractors_with_extensions(tf_op_extractors)

    try:
        graph = protobuf2nx(graph_def)
        graph.__setattr__('name', output_model_name)
        # 'layout' parameter change may cause an issue in EltwiseInputReshape replacer
        # and convert_nhwc_to_nchw(graph)
        graph.graph['layout'] = 'NCHW' if argv.disable_nhwc_to_nchw else 'NHWC'
        graph.graph['cmd_params'] = argv
        graph.graph['fw'] = 'tf'
        graph.graph['ir_version'] = 2 if argv.generate_deprecated_IR_V2 else 5

        graph.graph['variables_values'] = variables_values
        del variables_values

        graph = restore_edges(graph, get_tf_edges)
        graph = remove_control_dependency_inputs(graph)
    except Exception as e:
        raise Error(
            'Cannot pre-process TensorFlow graph after reading from model file "{}". ' \
            'File is corrupt or has unsupported format. Details: {}. ' +
            refer_to_faq_msg(44),
            model_file_name,
            str(e)
        ) from e

    graph.check_empty_graph('protobuf2nx. It may happen due to problems with loaded model')
    extract_node_attrs(graph, lambda node: tf_op_extractor(node, check_for_duplicates(tf_op_extractors)))

    # --------------------------------- LOAD END ------------------------------------------------------
    class_registration.apply_replacements(graph, class_registration.ClassType.FRONT_REPLACER)
    class_registration.apply_replacements(graph, class_registration.ClassType.MIDDLE_REPLACER)

    fuse_pad(graph)
    graph_clean_up_tf(graph)

    convert_matmul_to_fully_connected(graph)

    # Mark nodes with attr 'can_be_fused': False to disable fusing for specified nodes
    for_graph_and_each_sub_graph_recursively(graph, lambda graph: mark_unfused_nodes(graph, argv.finegrain_fusing))

    # Converting FusedBatchNorm layer to Mul->Add->Mul->Add sequence
    # IE doesn't support BN with 4 inputs, so we have to split it to two ScaleShift
    convert_batch_norm(graph)
    graph_clean_up_tf(graph)

    if not argv.disable_fusing:
        # Converting ScaleShift layer to Mul->Add
        for_graph_and_each_sub_graph_recursively(graph, convert_scale_shift_to_mul_add)
        for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

        # Fusing the sequences of Mul/Add operations
        for_graph_and_each_sub_graph_recursively(graph, fuse_mul_add_sequence)
        for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

        # Fusing linear operation to Convolution
        for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops)
        for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

    if not argv.disable_gfusing:
        grouped_convolutions_fusing(graph)
        graph_clean_up_tf(graph)
        if not argv.disable_fusing:
            fuse_linear_ops(graph)
            graph_clean_up_tf(graph)

    # Converting Mul->Add to ScaleShift node
    for_graph_and_each_sub_graph_recursively(graph, convert_muladd_to_scaleshift_or_power)
    for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

    for_graph_and_each_sub_graph_recursively(graph, convert_mul_add_to_power)

    # Need to eliminate dead nodes before doing update_fully_connected_shapes
    # because update_fully_connected_shapes does partial inference and dead
    # nodes will lead to sporadic failures.
    for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)
    for_graph_and_each_sub_graph_recursively(graph, update_fully_connected_shapes)

    for_graph_and_each_sub_graph_recursively(graph, convert_mul_eltwise_to_leaky_relu)
    graph_clean_up_tf(graph)
    for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

    for_graph_and_each_sub_graph_recursively(graph, fuse_pad)
    for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

    for_graph_and_each_sub_graph_recursively(graph, convert_reshape)
    for_graph_and_each_sub_graph_recursively(graph, convert_squeeze)

    for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

    for_graph_and_each_sub_graph_recursively(graph, convert_add_or_mul_to_scaleshift)  # scale = 1
    for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

    if argv.reverse_input_channels:
        reverse_input_channels(graph)

    if argv.move_to_preprocess:
        move_scaleshift_to_preprocess(graph)
        graph_clean_up_tf(graph)

    fuse_sequence_of_reshapes(graph)

    pattern = EltwiseInputNormalize()
    pattern.find_and_replace_pattern(graph)

    conv_flatten_concat(graph)

    if argv.enable_concat_optimization:
        ConcatOptimization().find_and_replace_pattern(graph)

    LayoutChangeForConstantShapePaths().find_and_replace_pattern(graph)
    for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

    for_graph_and_each_sub_graph_recursively(graph, apply_nhwc_to_nchw_permutation)
    for_graph_and_each_sub_graph_recursively(graph, merge_nodes_permutations)
    for_graph_and_each_sub_graph_recursively(graph, permute_data_nodes_attrs)
    for_graph_and_each_sub_graph_recursively(graph, permute_op_nodes_attrs)

    for_graph_and_each_sub_graph_recursively(graph, repack_fully_connected_weights_nhwc_to_nchw)
    for_graph_and_each_sub_graph_recursively(graph, transpose_fully_connected_weights)

    for_graph_and_each_sub_graph_recursively(graph, graph_clean_up_tf)

    class_registration.apply_replacements(graph, class_registration.ClassType.BACK_REPLACER)

    for_graph_and_each_sub_graph_recursively(graph, remove_const_ops)
    CreateConstNodesReplacement().find_and_replace_pattern(graph)

    for_graph_and_each_sub_graph_recursively(graph, remove_output_ops)

    prepare_emit_ir(graph=graph, data_type=argv.data_type, output_dir=output_dir, output_model_name=output_model_name,
                    meta_info=meta_info)

    return 0