コード例 #1
0
    def test_2(self):
        graph = build_graph(nodes,
                            edges, {
                                'mul': {
                                    'can_be_fused': True
                                },
                                'mul_const_data': {
                                    'shape': np.array([1]),
                                    'value': np.array([-1])
                                },
                                'quantize_data': {
                                    'shape': np.array([2, 3, 4, 4])
                                },
                                'mi_o_data': {
                                    'shape': np.array([1]),
                                    'value': np.array([0])
                                },
                                'ma_o_data': {
                                    'shape': np.array([1]),
                                    'value': np.array([1])
                                },
                            },
                            nodes_with_edges_only=True)
        graph.stage = 'middle'
        graph_ref = build_graph(nodes,
                                edges_ref, {
                                    'quantize_data': {
                                        'shape': np.array([2, 3, 4, 4])
                                    },
                                    'mul_const_data': {
                                        'shape': np.array([1]),
                                        'value': np.array([-1])
                                    },
                                    'mi_o_data': {
                                        'shape': np.array([1]),
                                        'value': np.array([1])
                                    },
                                    'ma_o_data': {
                                        'shape': np.array([1]),
                                        'value': np.array([0])
                                    },
                                    'mi_i_data': {
                                        'shape': np.array([1]),
                                        'value': np.array([10])
                                    },
                                    'ma_i_data': {
                                        'shape': np.array([1]),
                                        'value': np.array([-10])
                                    },
                                },
                                nodes_with_edges_only=True)

        MulFakeQuantizeFuse().find_and_replace_pattern(graph)

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'output',
                                      check_op_attrs=True)

        self.assertTrue(flag, resp)
コード例 #2
0
    def test_negative_5(self):
        graph = build_graph(
            nodes,
            edges, {
                'mul': {
                    'can_be_fused': True
                },
                'mul_const_data': {
                    'shape': np.array([3, 1, 1]),
                    'value': np.array([[[0]], [[1]], [[2]]])
                },
                'quantize_data': {
                    'shape': np.array([2, 3, 4, 4])
                },
                'mi_o_data': {
                    'shape': np.array([1, 1, 1, 1]),
                    'value': np.broadcast_to(np.array([0]), (1, 1, 1, 1))
                },
                'ma_o_data': {
                    'shape': np.array([1, 1, 1, 1]),
                    'value': np.broadcast_to(np.array([1]), (1, 1, 1, 1))
                },
            },
            nodes_with_edges_only=True)
        graph.stage = 'middle'
        graph_ref = graph.copy()

        MulFakeQuantizeFuse().find_and_replace_pattern(graph)

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'output',
                                      check_op_attrs=True)

        self.assertTrue(flag, resp)
コード例 #3
0
    def negative_test_1(self):
        graph = build_graph(nodes, edges, nodes_with_edges_only=True)
        graph.stage = 'middle'
        graph_ref = build_graph(nodes, edges, nodes_with_edges_only=True)

        MulFakeQuantizeFuse().find_and_replace_pattern(graph)
        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'output',
                                      check_op_attrs=True)

        self.assertTrue(flag, resp)
コード例 #4
0
    def find_and_replace_pattern(self, graph: Graph):
        fw = graph.graph['fw']
        argv = graph.graph['cmd_params']
        layout = graph.graph['layout']

        for_graph_and_each_sub_graph_recursively(graph, fuse_pad)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        # Mark nodes with attr 'can_be_fused': False to disable fusing for specified nodes
        for_graph_and_each_sub_graph_recursively(graph, lambda graph: mark_unfused_nodes(graph, argv.finegrain_fusing))

        # Converting FusedBatchNorm layer to Mul->Add->Mul->Add sequence
        # IE doesn't support batchNormInference with 4 inputs, so we have to split it to two ScaleShift
        for_graph_and_each_sub_graph_recursively(graph, convert_batch_norm)

        if fw == 'caffe':
            # Converting ScaleShift layer to Mul->Add
            for_graph_and_each_sub_graph_recursively(graph, convert_scale_shift_to_mul_add)

        for_graph_and_each_sub_graph_recursively(graph, Div().find_and_replace_pattern)
        for_graph_and_each_sub_graph_recursively(graph, Sub().find_and_replace_pattern)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        if not argv.disable_fusing:
            if fw != 'caffe':
                # Converting ScaleShift layer to Mul->Add
                for_graph_and_each_sub_graph_recursively(graph, convert_scale_shift_to_mul_add)
                for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

            # Fusing the sequences of Mul/Add operations
            for_graph_and_each_sub_graph_recursively(graph, fuse_mul_add_sequence)
            for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

            normalize_eltwise_inputs(graph)
            for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

            # Fusing linear operation to Convolution
            for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops)
            for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        if not argv.disable_gfusing:
            for_graph_and_each_sub_graph_recursively(graph, grouped_convolutions_fusing)
            for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())
            if not argv.disable_fusing:
                for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops)
                for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        for_graph_and_each_sub_graph_recursively(graph, normalize_eltwise_inputs)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        MarkNodesToFuseUpToFakeQuantize().find_and_replace_pattern(graph)
        FakeQuantizeFuse().find_and_replace_pattern(graph)
        AddFakeQuantizeFuse().find_and_replace_pattern(graph)
        MulFakeQuantizeFuse().find_and_replace_pattern(graph)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        for_graph_and_each_sub_graph_recursively(graph, fuse_pad)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        if layout != 'NHWC' and not argv.disable_resnet_optimization:
            stride_optimization(graph)
コード例 #5
0
def driver(onnx_modelproto_bytes, precision: str, output_model_name: str,
           output_dir: str):
    try:
        model_proto = onnx.load_from_string(bytes(onnx_modelproto_bytes))
    except Exception as e:
        print("[python] onnx exception: ", str(e))

    model_graph = model_proto.graph  # pylint: disable=no-member
    log.debug("Number of nodes in graph_def: {}".format(len(model_graph.node)))
    log.debug(
        "Number of all input ports (not true inputs) in graph_def: {}".format(
            len(model_graph.input)))
    log.debug("Number of initializers in graph_def: {}".format(
        len(model_graph.initializer)))
    log.debug("Number of real inputs in graph_def: {}".format(
        len(model_graph.input) - len(model_graph.initializer)))
    update_extractors_with_extensions(onnx_op_extractors)

    try:
        graph = protobuf2nx(model_proto)
        log.debug("Number of nodes in NX graph: {}".format(
            graph.number_of_nodes()))
        graph.__setattr__(
            'name',
            output_model_name if output_model_name else model_proto.graph.name)  # pylint: disable=no-member
        graph.graph['layout'] = 'NCHW'
        graph.graph['fw'] = 'onnx'
        graph.graph[
            'feature_dim'] = 1 if graph.graph['layout'] == 'NCHW' else 3
        graph.graph['cmd_params'] = argparse.Namespace(
            batch=None,
            data_type='float',
            disable_fusing=False,
            disable_gfusing=False,
            disable_resnet_optimization=False,
            enable_concat_optimization=False,
            extensions=mo_extensions,
            finegrain_fusing=None,
            framework='onnx',
            freeze_placeholder_with_value=None,
            generate_deprecated_IR_V2=False,
            input=None,
            input_model=None,
            input_shape=None,
            keep_shape_ops=False,
            log_level='ERROR',
            mean_scale_values={},
            mean_values=(),
            model_name=None,
            move_to_preprocess=False,
            output=None,
            output_dir='.',
            placeholder_shapes=None,
            reverse_input_channels=False,
            scale=None,
            scale_values=(),
            silent=False,
            version=False,
            blobs_as_inputs=False,
            keep_quantize_ops_in_IR=False,
            generate_experimental_IR_V10=False)
        graph.graph['ir_version'] = 6

    except Exception as e:
        raise Error(
            'Cannot pre-process ONNX graph after reading from model file "{}". ' \
            'File is corrupt or has unsupported format. Details: {}. ' +
            refer_to_faq_msg(44),
            model_file_name,
            str(e)
        ) from e
    graph.check_empty_graph(
        'protobuf2nx. It may happen due to problems with loaded model')
    extract_node_attrs(
        graph, lambda node: onnx_op_extractor(
            node, check_for_duplicates(onnx_op_extractors)))

    # --------------------------------- LOAD END ------------------------------------------------------
    class_registration.apply_replacements(
        graph, class_registration.ClassType.FRONT_REPLACER)
    class_registration.apply_replacements(
        graph, class_registration.ClassType.MIDDLE_REPLACER)

    fuse_pad(graph)
    graph_clean_up_onnx(graph)

    for_graph_and_each_sub_graph_recursively(
        graph, convert_matmul_to_fully_connected)

    # Mark nodes with attr 'can_be_fused': False to disable fusing for specified nodes
    mark_unfused_nodes(graph, False)

    # Converting FusedBatchNorm layer to Mul->Add->Mul->Add sequence
    # IE doesn't support BN with 4 inputs, so we have to split it to two ScaleShift
    convert_batch_norm(graph)
    graph_clean_up_onnx(graph)

    # Converting ScaleShift layer to Mul->Add
    convert_scale_shift_to_mul_add(graph)
    graph_clean_up_onnx(graph)

    # Fusing the sequences of Mul/Add operations
    fuse_mul_add_sequence(graph)
    graph_clean_up_onnx(graph)

    # Fusing linear operation to Convolution
    fuse_linear_ops(graph)
    graph_clean_up_onnx(graph)

    grouped_convolutions_fusing(graph)
    graph_clean_up_onnx(graph)

    fuse_linear_ops(graph)
    graph_clean_up_onnx(graph)

    MarkNodesToFuseUpToFakeQuantize().find_and_replace_pattern(graph)
    FakeQuantizeFuse().find_and_replace_pattern(graph)

    AddFakeQuantizeFuse().find_and_replace_pattern(graph)
    MulFakeQuantizeFuse().find_and_replace_pattern(graph)

    convert_muladd_to_scaleshift(graph)
    graph_clean_up_onnx(graph)

    graph_clean_up_onnx(graph)
    convert_add_or_mul_to_scaleshift(graph)  # scale = 1
    graph_clean_up_onnx(graph)

    fuse_pad(graph)
    graph_clean_up_onnx(graph)

    FuseReshapesSequence().find_and_replace_pattern(graph)
    RemoveRedundantReshapes().find_and_replace_pattern(graph)

    graph_clean_up_onnx(graph)

    pattern = EltwiseInputNormalize()
    pattern.find_and_replace_pattern(graph)

    merge_nodes_permutations(graph)
    permute_data_nodes_attrs(graph)
    permute_op_nodes_attrs(graph)

    graph_clean_up_onnx(graph)
    class_registration.apply_replacements(
        graph, class_registration.ClassType.BACK_REPLACER)

    for_graph_and_each_sub_graph_recursively(graph, remove_const_ops)

    CreateConstNodesReplacement().find_and_replace_pattern(graph)

    for_graph_and_each_sub_graph_recursively(graph, remove_output_ops)

    weights, xml_string = prepare_emit_ir(graph=graph,
                                          data_type=precision,
                                          output_dir=output_dir,
                                          output_model_name=output_model_name,
                                          meta_info={'unset': []})

    return weights, xml_string
コード例 #6
0
ファイル: onnx.py プロジェクト: ChaYe001/LLvisionCompile
def driver(argv: argparse.Namespace, model_file_name: str,
           output_model_name: str, output_dir: str):
    log_step(argv.steps, 'LOAD')
    meta_info = get_meta_info(argv)

    model_proto = load_onnx_model(model_file_name)
    model_graph = model_proto.graph  # pylint: disable=no-member
    # print(model_graph)
    # assert len(model_graph) == 1, "An ONNX model contains more than 1 graph: unsupported"
    log.debug("Number of nodes in graph_def: {}".format(len(model_graph.node)))
    log.debug(
        "Number of all input ports (not true inputs) in graph_def: {}".format(
            len(model_graph.input)))
    log.debug("Number of initializers in graph_def: {}".format(
        len(model_graph.initializer)))
    log.debug("Number of real inputs in graph_def: {}".format(
        len(model_graph.input) - len(model_graph.initializer)))
    update_extractors_with_extensions(onnx_op_extractors)

    try:
        graph = protobuf2nx(model_proto)
        log.debug("Number of nodes in NX graph: {}".format(
            graph.number_of_nodes()))
        graph.__setattr__(
            'name',
            output_model_name if output_model_name else model_proto.graph.name)  # pylint: disable=no-member
        graph.graph['layout'] = 'NCHW'
        graph.graph['cmd_params'] = argv
        graph.graph['fw'] = 'onnx'
        graph.graph[
            'feature_dim'] = 1 if graph.graph['layout'] == 'NCHW' else 3

        if graph.graph['cmd_params'].generate_experimental_IR_V10:
            version = 10
        else:
            version = 6
        graph.graph[
            'ir_version'] = 2 if argv.generate_deprecated_IR_V2 else version

    except Exception as e:
        raise Error(
            'Cannot pre-process ONNX graph after reading from model file "{}". ' \
            'File is corrupt or has unsupported format. Details: {}. ' +
            refer_to_faq_msg(44),
            model_file_name,
            str(e)
        ) from e
    graph.check_empty_graph(
        'protobuf2nx. It may happen due to problems with loaded model')
    extract_node_attrs(
        graph, lambda node: onnx_op_extractor(
            node, check_for_duplicates(onnx_op_extractors)))

    # --------------------------------- LOAD END ------------------------------------------------------
    log_step(argv.steps, 'FRONT')
    class_registration.apply_replacements(
        graph, class_registration.ClassType.FRONT_REPLACER)
    log_step(argv.steps, 'MIDDLE')
    class_registration.apply_replacements(
        graph, class_registration.ClassType.MIDDLE_REPLACER)

    fuse_pad(graph)
    graph_clean_up_onnx(graph)

    for_graph_and_each_sub_graph_recursively(
        graph, convert_matmul_to_fully_connected)

    # Mark nodes with attr 'can_be_fused': False to disable fusing for specified nodes
    mark_unfused_nodes(graph, argv.finegrain_fusing)

    # Converting FusedBatchNorm layer to Mul->Add->Mul->Add sequence
    # IE doesn't support BN with 4 inputs, so we have to split it to two ScaleShift
    convert_batch_norm(graph)
    graph_clean_up_onnx(graph)

    if not argv.disable_fusing:
        # Converting ScaleShift layer to Mul->Add
        convert_scale_shift_to_mul_add(graph)
        graph_clean_up_onnx(graph)

        # Fusing the sequences of Mul/Add operations
        fuse_mul_add_sequence(graph)
        graph_clean_up_onnx(graph)

        # Fusing linear operation to Convolution
        fuse_linear_ops(graph)
        graph_clean_up_onnx(graph)

    if not argv.disable_gfusing:
        grouped_convolutions_fusing(graph)
        graph_clean_up_onnx(graph)
        if not argv.disable_fusing:
            fuse_linear_ops(graph)
            graph_clean_up_onnx(graph)

    MarkNodesToFuseUpToFakeQuantize().find_and_replace_pattern(graph)
    FakeQuantizeFuse().find_and_replace_pattern(graph)

    AddFakeQuantizeFuse().find_and_replace_pattern(graph)
    MulFakeQuantizeFuse().find_and_replace_pattern(graph)

    convert_muladd_to_scaleshift(graph)
    graph_clean_up_onnx(graph)

    graph_clean_up_onnx(graph)
    convert_add_or_mul_to_scaleshift(graph)  # scale = 1
    graph_clean_up_onnx(graph)

    fuse_pad(graph)
    graph_clean_up_onnx(graph)

    if argv.reverse_input_channels:
        reverse_input_channels(graph)

    if argv.move_to_preprocess:
        move_scaleshift_to_preprocess(graph)
        graph_clean_up_onnx(graph)

    FuseReshapesSequence().find_and_replace_pattern(graph)
    RemoveRedundantReshapes().find_and_replace_pattern(graph)

    graph_clean_up_onnx(graph)

    pattern = EltwiseInputNormalize()
    pattern.find_and_replace_pattern(graph)

    merge_nodes_permutations(graph)
    permute_data_nodes_attrs(graph)
    permute_op_nodes_attrs(graph)

    graph_clean_up_onnx(graph)

    log_step(argv.steps, 'BACK')
    class_registration.apply_replacements(
        graph, class_registration.ClassType.BACK_REPLACER)

    for_graph_and_each_sub_graph_recursively(graph, remove_const_ops)

    CreateConstNodesReplacement().find_and_replace_pattern(graph)

    for_graph_and_each_sub_graph_recursively(graph, remove_output_ops)

    log_step(argv.steps, 'EMIT')
    prepare_emit_ir(graph=graph,
                    data_type=argv.data_type,
                    output_dir=output_dir,
                    output_model_name=output_model_name,
                    meta_info=meta_info)

    return 0