Esempio n. 1
0
    def test_div_test_2(self):
        # Test with two same inputs from one placeholder
        graph = build_graph(nodes_attributes,
                            [('placeholder_1', 'Div'),
                             ('placeholder_1', 'Div'),
                             ('Div', 'last')
                             ],
                            {'placeholder_1': {'shape': np.array([1, 227, 227, 3])},
                             }, nodes_with_edges_only=True)

        graph_ref = build_graph(nodes_attributes,
                                [('power_1', 'mul_1'),
                                 ('placeholder_1', 'mul_1'),
                                 ('placeholder_1', 'power_1'),
                                 ('mul_1', 'last'),
                                 ],
                                {'placeholder_1': {'shape': np.array([1, 227, 227, 3])},
                                 'power_1': {'scale': np.array(1), 'power': np.array(-1), 'shift': np.array(0),
                                             'type': 'Power'},
                                 'mul_1': {'type': 'Eltwise', 'op': 'Mul'},
                                 }, nodes_with_edges_only=True)

        graph.stage = 'front'

        tested_class = Div()
        tested_class.find_and_replace_pattern(graph)

        (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True)
        self.assertTrue(flag, resp)
Esempio n. 2
0
    def test_div_with_integer(self):
        # Test where transformation should not be applied because the divisor is integer
        graph = build_graph(
            {
                **regular_op_with_shaped_data('parameter', [1, 227, 227, 3], {
                                                  'type': 'Parameter',
                                                  'data_type': np.int32
                                              }),
                **valued_const_with_data('const',
                                         np.array([-1.], dtype=np.int32)),
                **regular_op_with_shaped_data('div', None, {
                    'op': 'Div',
                    'type': 'Divide',
                    'name': 'my_div'
                }),
                **result()
            }, [
                *connect('parameter:0', '0:div'),
                *connect_data('const:0', '1:div'),
                *connect('div', 'output'),
            ])
        graph_ref = graph.copy()
        Div().find_and_replace_pattern(graph)

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'output',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
Esempio n. 3
0
    def test_div_test_2(self):
        # Test with two same inputs from one placeholder
        graph = build_graph(nodes, [
            *connect('placeholder_1:0', '0:div'),
            *connect_data('placeholder_1:0', '1:div'),
            *connect('div', 'output'),
        ],
                            nodes_with_edges_only=True)
        Div().find_and_replace_pattern(graph)

        graph_ref = build_graph(nodes, [
            *connect('placeholder_1:0', '0:mul'),
            *connect_data('placeholder_1:0', '0:reciprocal'),
            *connect('minus_one', '1:reciprocal'),
            *connect('reciprocal', '1:mul'),
            *connect('mul', 'output'),
        ],
                                nodes_with_edges_only=True)

        (flag, resp) = compare_graphs(graph,
                                      graph_ref,
                                      'output',
                                      check_op_attrs=True)
        self.assertTrue(flag, resp)
        self.assertTrue(graph.node[graph.get_nodes_with_attributes(
            type='Multiply')[0]]['name'] == 'my_div')
    def find_and_replace_pattern(self, graph: Graph):
        fw = graph.graph['fw']
        argv = graph.graph['cmd_params']
        layout = graph.graph['layout']

        for_graph_and_each_sub_graph_recursively(graph, fuse_pad)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        # Mark nodes with attr 'can_be_fused': False to disable fusing for specified nodes
        for_graph_and_each_sub_graph_recursively(graph, lambda graph: mark_unfused_nodes(graph, argv.finegrain_fusing))

        # Converting FusedBatchNorm layer to Mul->Add->Mul->Add sequence
        # IE doesn't support batchNormInference with 4 inputs, so we have to split it to two ScaleShift
        for_graph_and_each_sub_graph_recursively(graph, convert_batch_norm)

        if fw == 'caffe':
            # Converting ScaleShift layer to Mul->Add
            for_graph_and_each_sub_graph_recursively(graph, convert_scale_shift_to_mul_add)

        for_graph_and_each_sub_graph_recursively(graph, Div().find_and_replace_pattern)
        for_graph_and_each_sub_graph_recursively(graph, Sub().find_and_replace_pattern)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        if not argv.disable_fusing:
            if fw != 'caffe':
                # Converting ScaleShift layer to Mul->Add
                for_graph_and_each_sub_graph_recursively(graph, convert_scale_shift_to_mul_add)
                for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

            # Fusing the sequences of Mul/Add operations
            for_graph_and_each_sub_graph_recursively(graph, fuse_mul_add_sequence)
            for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

            normalize_eltwise_inputs(graph)
            for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

            # Fusing linear operation to Convolution
            for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops)
            for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        if not argv.disable_gfusing:
            for_graph_and_each_sub_graph_recursively(graph, grouped_convolutions_fusing)
            for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())
            if not argv.disable_fusing:
                for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops)
                for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        for_graph_and_each_sub_graph_recursively(graph, normalize_eltwise_inputs)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        MarkNodesToFuseUpToFakeQuantize().find_and_replace_pattern(graph)
        FakeQuantizeFuse().find_and_replace_pattern(graph)
        AddFakeQuantizeFuse().find_and_replace_pattern(graph)
        MulFakeQuantizeFuse().find_and_replace_pattern(graph)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        for_graph_and_each_sub_graph_recursively(graph, fuse_pad)
        for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up())

        if layout != 'NHWC' and not argv.disable_resnet_optimization:
            stride_optimization(graph)