def test_sub_test_2(self): # Test with two same inputs from one placeholder graph = build_graph(nodes, [ *connect('placeholder_1:0', '0:sub'), *connect_data('placeholder_1:0', '1:sub'), *connect('sub', 'output'), ], nodes_with_edges_only=True) Sub().find_and_replace_pattern(graph) graph_ref = build_graph(nodes, [ *connect('placeholder_1:0', '0:add'), *connect_data('placeholder_1:0', '0:negate'), *connect('minus_one', '1:negate'), *connect('negate', '1:add'), *connect('add', 'output'), ], nodes_with_edges_only=True) (flag, resp) = compare_graphs(graph, graph_ref, 'output', check_op_attrs=True) self.assertTrue(flag, resp) self.assertTrue(graph.node[graph.get_nodes_with_attributes( type='Add')[0]]['name'] == 'my_sub')
def test_sub_test_2(self): # test with two same inputs from one placeholder graph = build_graph(nodes_attributes, [('placeholder_1', 'Sub'), ('placeholder_1', 'Sub'), ('Sub', 'last') ], nodes_with_edges_only=True) graph_ref = build_graph(nodes_attributes, [('neg', 'add_1'), ('placeholder_1', 'add_1'), ('placeholder_1', 'neg'), ('const', 'neg'), ('add_1', 'last'), ], nodes_with_edges_only=True) graph.stage = 'front' tested_class = Sub() tested_class.find_and_replace_pattern(graph) (flag, resp) = compare_graphs(graph, graph_ref, 'last', check_op_attrs=True) self.assertTrue(flag, resp)
def find_and_replace_pattern(self, graph: Graph): fw = graph.graph['fw'] argv = graph.graph['cmd_params'] layout = graph.graph['layout'] for_graph_and_each_sub_graph_recursively(graph, fuse_pad) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) # Mark nodes with attr 'can_be_fused': False to disable fusing for specified nodes for_graph_and_each_sub_graph_recursively(graph, lambda graph: mark_unfused_nodes(graph, argv.finegrain_fusing)) # Converting FusedBatchNorm layer to Mul->Add->Mul->Add sequence # IE doesn't support batchNormInference with 4 inputs, so we have to split it to two ScaleShift for_graph_and_each_sub_graph_recursively(graph, convert_batch_norm) if fw == 'caffe': # Converting ScaleShift layer to Mul->Add for_graph_and_each_sub_graph_recursively(graph, convert_scale_shift_to_mul_add) for_graph_and_each_sub_graph_recursively(graph, Div().find_and_replace_pattern) for_graph_and_each_sub_graph_recursively(graph, Sub().find_and_replace_pattern) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) if not argv.disable_fusing: if fw != 'caffe': # Converting ScaleShift layer to Mul->Add for_graph_and_each_sub_graph_recursively(graph, convert_scale_shift_to_mul_add) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) # Fusing the sequences of Mul/Add operations for_graph_and_each_sub_graph_recursively(graph, fuse_mul_add_sequence) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) normalize_eltwise_inputs(graph) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) # Fusing linear operation to Convolution for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) if not argv.disable_gfusing: for_graph_and_each_sub_graph_recursively(graph, grouped_convolutions_fusing) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) if not argv.disable_fusing: for_graph_and_each_sub_graph_recursively(graph, fuse_linear_ops) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) for_graph_and_each_sub_graph_recursively(graph, normalize_eltwise_inputs) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) MarkNodesToFuseUpToFakeQuantize().find_and_replace_pattern(graph) FakeQuantizeFuse().find_and_replace_pattern(graph) AddFakeQuantizeFuse().find_and_replace_pattern(graph) MulFakeQuantizeFuse().find_and_replace_pattern(graph) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) for_graph_and_each_sub_graph_recursively(graph, fuse_pad) for_graph_and_each_sub_graph_recursively(graph, lambda G: G.clean_up()) if layout != 'NHWC' and not argv.disable_resnet_optimization: stride_optimization(graph)