def test_pooling_infer_no_shape(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'pool'), ('pool', 'node_2'),
                               ('node_2', 'op_output')],
            {
                'node_2': {
                    'shape': None
                },
                'node_1': {
                    'shape': None
                },
                'pool': {
                    'window': np.array([1, 1, 1, 1]),
                    'stride': np.array([1, 1, 2, 2]),
                    'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]),
                    'pad_spatial_shape': np.array([[3, 3], [3, 3]]),
                    'pool_method': 'avg',
                    'exclude_pad': False,
                    'output_spatial_shape': None,
                    'output_shape': None,
                    'kernel_spatial': np.array([3, 3]),
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0]),
                    'pooling_convention': 'full'
                }
            })

        pool_node = Node(graph, 'pool')
        Pooling.infer(pool_node)
        res_shape = graph.node['node_2']['shape']
        self.assertIsNone(res_shape)
Beispiel #2
0
    def extract(cls, node):
        # Extract pads attribute
        pads = np.array([node.module.padding, node.module.padding],
                        dtype=np.int64).reshape(1, 2)
        pads = np.repeat(pads, 2, axis=0)
        final_pads = np.array([[0, 0], [0, 0], *pads], dtype=np.int64)

        # Extract strides attribute
        strides = [node.module.stride, node.module.stride]
        final_strides = np.array([1, 1, *strides], dtype=np.int64)

        kernel_shape = [node.module.kernel_size, node.module.kernel_size]
        final_kernel_shape = np.array([1, 1, *kernel_shape], dtype=np.int64)

        attrs = {
            'op': node.op,
            'window': final_kernel_shape,
            'stride': final_strides,
            'pad': final_pads,
            'pool_method': 'max',
            'channel_dims': np.array([1], dtype=np.int64),
            'batch_dims': np.array([0], dtype=np.int64),
            'layout': 'NCHW',
        }

        # update the attributes of the node
        Pooling.update_node_stat(node, attrs)
        return cls.enabled
def _insert_pooling(graph: Graph, first_node: Node, second_node: Node,
                    spatial_dims):
    """
    This function inserts point wise pooling layer between two nodes
    """
    log.debug("STRIDE PROP: Insert pooling between {} and {}".format(
        first_node.name, second_node.name))
    stride_prop = second_node.stride_prop
    assert len(graph.get_edge_data(first_node.id, second_node.id)) == 1
    eattrs = graph.get_edge_data(first_node.id, second_node.id)[0]
    graph.remove_edge(first_node.id, second_node.id)

    pooling = Pooling(
        graph,
        dict(name='Pooling_',
             spatial_dims=spatial_dims,
             window=np.array([1, 1, 1, 1]),
             output_spatial_shape=None,
             stride=np.array(stride_prop),
             pad_spatial_shape=np.array([[0, 0], [0, 0]]),
             pad=np.array([[0, 0], [0, 0], [0, 0], [0, 0]]),
             pool_method='max',
             is_partial_inferred=False))
    pooling_data = pooling.create_node_with_data([first_node])

    _clean_fw_tensor_attrs(pooling_data)

    graph.add_edges_from([(pooling_data.id, second_node.id, eattrs)])
Beispiel #4
0
    def extract(cls, node):
        pb = node.parameters
        collect_until_token(pb, b'<PoolSize>')
        kernel = read_binary_integer32_token(pb)
        tag = find_next_tag(pb)
        if tag == '<PoolStep>':
            read_placeholder(pb, 1)
            stride = read_binary_integer32_token(pb)
            pool_step = stride
            pool_stride = read_token_value(pb, b'<PoolStride>')
        elif tag == '<PoolStride>':
            stride = 1
            pool_step = None
            read_placeholder(pb, 1)
            pool_stride = read_binary_integer32_token(pb)
        else:
            raise Error('Can not extract parameters for {}'.format(node))

        mapping_rule = {
            'window': np.array([1, 1, 1, kernel], dtype=np.int64),
            'stride': np.array([1, 1, stride, stride], dtype=np.int64),
            'pool_stride': pool_stride,
            'pool_step': pool_step,
            'pad': np.array([[0, 0], [0, 0], [0, 0], [0, 0]], dtype=np.int64),
            'pad_spatial_shape': np.array([[0, 0], [0, 0]], dtype=np.int64),
            'pool_method': 'max',
        }
        mapping_rule.update(layout_attrs())
        Pooling.update_node_stat(node, mapping_rule)
        return cls.enabled
Beispiel #5
0
    def test_pooling_dynamic_infer(self):
        graph = build_graph(nodes_attributes,
                            [('node_1', 'pool'),
                             ('pool', 'node_2'),
                             ('node_2', 'op_output')
                             ],
                            {'node_2': {'shape': None},
                             'node_1': {'shape': shape_array([1, dynamic_dimension_value, dynamic_dimension_value,
                                                              256])},
                             'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 2, 2]),
                                      'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]),
                                      'pad_spatial_shape': np.array([[3, 3], [3, 3]]),
                                      'pool_method': 'avg', 'exclude_pad': False, 'global_pool': False,
                                      'output_spatial_shape': None, 'output_shape': None,
                                      'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]),
                                      'channel_dims': np.array([1]), 'batch_dims': np.array([0]),
                                      'pooling_convention': 'full'}
                             })

        pool_node = Node(graph, 'pool')

        Pooling.infer(pool_node)
        exp_shape = shape_array([1, dynamic_dimension_value, dynamic_dimension_value, 131])
        res_shape = graph.node['node_2']['shape']
        self.assertTrue(strict_compare_tensors(exp_shape, res_shape))
Beispiel #6
0
    def infer(cls, node: Node):
        input_shape = node.in_node(0).shape
        input_h = input_shape[2]
        input_w = input_shape[3]
        output_h = node.output_size[0]
        output_w = node.output_size[1]

        stride_h = input_h // output_h
        stride_w = input_w // output_w
        kernel_h = input_h - (output_h - 1) * stride_h
        kernel_w = input_w - (output_w - 1) * stride_w

        data = {
            'window': int64_array([1, 1, kernel_h, kernel_w]),
            'stride': int64_array([1, 1, stride_h, stride_w]),
            'pad': int64_array([[0, 0], [0, 0], [0, 0], [0, 0]]),
            'pad_spatial_shape': int64_array([[0, 0], [0, 0]]),
            'pool_method': 'avg',
            'exclude_pad': 'false',
            'output_spatial_shape': None,
            'spatial_dims': None,
            'channel_dims': int64_array([1]),
            'batch_dims': int64_array([0]),
            'layout': 'NCHW',
            'rounding_type': 'floor',
            'pooling_convention': 'valid'
        }

        # update the attributes of the node
        Pooling.update_node_stat(node, data)
        Pooling.infer(node)
Beispiel #7
0
    def test_pooling_infer_with_dilations(self):
        graph = build_graph(nodes_attributes,
                            [('node_1', 'pool'),
                             ('pool', 'node_2'),
                             ('node_2', 'op_output')
                             ],
                            {'node_2': {'shape': None},
                             'node_1': {'shape': np.array([1, 3, 256, 256])},
                             'pool': {'window': np.array([1, 1, 2, 2]), 'stride': np.array([1, 1, 2, 2]),
                                      'pad': np.array([[0, 0], [0, 0], [0, 0], [1, 1]]),
                                      'pad_spatial_shape': np.array([[0, 0], [1, 1]]),
                                      'pool_method': 'max', 'exclude_pad': False, 'global_pool': False,
                                      'output_spatial_shape': None, 'output_shape': None,
                                      'kernel_spatial': np.array([2, 2]), 'spatial_dims': np.array([2, 3]),
                                      'channel_dims': np.array([1]), 'batch_dims': np.array([0]),
                                      'pooling_convention': 'full', 'dilation': np.array([1, 1, 2, 2]),
                                      'auto_pad': 'valid'}
                             })

        pool_node = Node(graph, 'pool')

        Pooling.infer(pool_node)
        exp_shape = np.array([1, 3, 127, 127])
        res_shape = graph.node['node_2']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
Beispiel #8
0
    def test_pooling_infer_decrement_input_spatial(self):
        graph = build_graph(nodes_attributes,
                            [('node_1', 'pool'),
                             ('pool', 'node_2'),
                             ('node_2', 'op_output')
                             ],
                            {'node_2': {'shape': None},
                             'node_1': {'shape': np.array([1, 3, 224, 224])},
                             'pool': {'window': np.array([1, 1, 1, 1]), 'stride': np.array([1, 1, 3, 3]),
                                      'pad': np.array([[0, 0], [0, 0], [3, 3], [3, 3]]),
                                      'pad_spatial_shape': np.array([[1, 1], [1, 1]]),
                                      'pool_method': 'avg', 'exclude_pad': 'false', 'global_pool': 0,
                                      'output_spatial_shape': None, 'output_shape': None,
                                      'kernel_spatial': np.array([3, 3]), 'spatial_dims': np.array([2, 3]),
                                      'channel_dims': np.array([1]), 'batch_dims': np.array([0]),
                                      'pooling_convention': 'full'}
                             })

        pool_node = Node(graph, 'pool')

        Pooling.infer(pool_node)
        exp_shape = np.array([1, 3, 75, 75])
        res_shape = graph.node['node_2']['shape']
        for i in range(0, len(exp_shape)):
            self.assertEqual(exp_shape[i], res_shape[i])
Beispiel #9
0
    def extract(cls, node):
        # Extract pads attribute
        final_pads = get_pads(node.module)

        # Extract strides attribute
        strides = [node.module.stride, node.module.stride]
        final_strides = np.array([1, 1, *strides], dtype=np.int64)

        kernel_shape = [node.module.kernel_size, node.module.kernel_size]
        final_kernel_shape = np.array([1, 1, *kernel_shape], dtype=np.int64)

        attrs = {
            'op': node.op,
            'window': final_kernel_shape,
            'stride': final_strides,
            'pad': final_pads,
            'pool_method': 'avg',
            'exclude_pad': 'false',
            'channel_dims': np.array([1], dtype=np.int64),
            'batch_dims': np.array([0], dtype=np.int64),
            'layout': 'NCHW',
        }

        # update the attributes of the node
        Pooling.update_node_stat(node, attrs)
        return cls.enabled
    def test_pooling_infer_wrong_input_shape(self):
        graph = build_graph(
            nodes_attributes, [('node_1', 'pool'), ('pool', 'node_2'),
                               ('node_2', 'op_output')],
            {
                'node_2': {
                    'shape': None
                },
                'node_1': {
                    'shape': np.array([1, 3, 1, 1])
                },
                'pool': {
                    'window': np.array([1, 1, 5, 5]),
                    'stride': np.array([1, 1, 2, 2]),
                    'pad': np.array([[0, 0], [0, 0], [1, 1], [1, 1]]),
                    'pad_spatial_shape': np.array([[1, 1], [1, 1]]),
                    'pool_method': 'avg',
                    'exclude_pad': False,
                    'global_pool': False,
                    'output_spatial_shape': None,
                    'output_shape': None,
                    'kernel_spatial': np.array([3, 3]),
                    'spatial_dims': np.array([2, 3]),
                    'channel_dims': np.array([1]),
                    'batch_dims': np.array([0]),
                    'pooling_convention': 'full'
                }
            })

        pool_node = Node(graph, 'pool')

        with self.assertRaises(Error):
            Pooling.infer(pool_node)
Beispiel #11
0
    def extract(cls, node):
        attrs = common_onnx_pool_extractor(node)
        attrs.update({'pooling_convention': 'full',
                      'global_pool': True,
                     })

        Pooling.update_node_stat(node, attrs)
        return cls.enabled
Beispiel #12
0
    def extract(node):
        attrs = get_mxnet_layer_attrs(node.symbol_dict)

        kernel = attrs.tuple("kernel", int, None)
        stride = attrs.tuple("stride", int,
                             tuple(np.ones(len(kernel), dtype=np.int64)))
        padding = attrs.tuple("pad", int,
                              tuple(np.zeros(len(kernel), dtype=np.int64)))
        method = attrs.str("pool_type", None)
        rt = 'floor'

        data = {
            'window':
            np.array([1, 1, *[k for k in kernel]], dtype=np.int64),
            'stride':
            np.array([1, 1, *[s for s in stride]], dtype=np.int64),
            'pad':
            np.array([[0, 0], [0, 0], *[[pad, pad] for pad in padding]],
                     dtype=np.int64),
            'pad_spatial_shape':
            np.array([[pad, pad] for pad in padding], dtype=np.int64),
            'pool_method':
            method,
            'exclude_pad':
            'false',
            'output_spatial_shape':
            None,
            'spatial_dims':
            None,
            'channel_dims':
            np.array([1], dtype=np.int64),
            'batch_dims':
            np.array([0], dtype=np.int64),
            'layout':
            'NCHW',
            'rounding_type':
            rt,
        }

        pooling_conv = attrs.str("pooling_convention", 'valid')
        if pooling_conv:
            data["pooling_convention"] = pooling_conv
            if pooling_conv == 'full':
                data["rounding_type"] = 'ceil'

        global_pool = attrs.bool("global_pool", False)
        if global_pool:
            data["global_pool"] = global_pool

        # update the attributes of the node
        Pooling.update_node_stat(node, data)
        return __class__.enabled
    def find_and_replace_pattern(self, graph: Graph):
        for pool_v2_node in graph.get_op_nodes(op='PoolingV2'):
            pool_v2_name = pool_v2_node.soft_get('name', pool_v2_node.id)

            pool_v1_node = Pooling(graph, {'window': pool_v2_node.in_port(1).data.get_value(),
                                           'stride': pool_v2_node.in_port(2).data.get_value(),

                                           'pad': pool_v2_node.pad,
                                           'spatial_dims': pool_v2_node.spatial_dims,
                                           'auto_pad': pool_v2_node.auto_pad,
                                           'output_spatial_shape': pool_v2_node.output_spatial_shape,
                                           'pad_spatial_shape': pool_v2_node.pad_spatial_shape,

                                           'pool_method': pool_v2_node.pool_method,
                                           'permute_attrs': pool_v2_node.permute_attrs,}).create_node()

            rename_nodes([(pool_v2_node, pool_v2_name + '/to_be_removed'), (pool_v1_node, pool_v2_name)])

            pool_v2_node.in_port(0).get_connection().set_destination(pool_v1_node.in_port(0))
            pool_v2_node.out_port(0).get_connection().set_source(pool_v1_node.out_port(0))
Beispiel #14
0
    def replace_op(self, graph: Graph, node: Node) -> list:
        input_node = node.in_node(0)

        input_reshape_node = Reshape(graph, {
            'name': 'Reshape/' + node.name,
            'infer': Reshape.kaldi_infer
        }).create_node([input_node])

        pooling_node = Pooling(graph, graph.node[node.id]).create_node(
            [input_reshape_node])

        output_reshape_node = Reshape(graph, {
            'name': node.name + '/Reshape/',
            'infer': Reshape.kaldi_infer
        }).create_node([pooling_node])

        return [output_reshape_node.id]
Beispiel #15
0
    def extract(cls, node):
        proto_layer = node.pb
        param = proto_layer.pooling_param

        method = 'max'
        exclude_pad = True
        kernel = [0, 0]
        stride = [1, 1]
        padding = [0, 0]
        global_pooling = False

        if hasattr(param, 'global_pooling') and param.global_pooling:
            global_pooling = param.global_pooling
        else:
            kernel = get_spatial_attr(kernel, 'kernel_size', 'kernel', param)
            padding = get_spatial_attr(padding, 'pad', 'pad', param)
            stride = get_spatial_attr(stride, 'stride', 'stride', param)

        if param.pool == 0:
            method = 'max'
            exclude_pad = True
        elif param.pool == 1:
            method = 'avg'
            exclude_pad = False
        else:
            raise ValueError('Unknown Pooling Method!')

        pooling_convention = 'full'  # for Caffe rounding type should be ceil
        rt = 'ceil'

        if hasattr(param, 'ceil_mode') and not param.ceil_mode:
            # If pooling has ceil_mode and ceil_mode is False using floor for rounding shapes in partial_infer
            pooling_convention = 'valid'
            rt = 'floor'

        attrs = {
            'window':
            np.array([1, 1, kernel[1], kernel[0]], dtype=np.int64),
            'stride':
            np.array([1, 1, stride[1], stride[0]], dtype=np.int64),
            'pad':
            np.array([[0, 0], [0, 0], [padding[1], padding[1]],
                      [padding[0], padding[0]]],
                     dtype=np.int64),
            'pad_spatial_shape':
            np.array([[padding[1], padding[1]], [padding[0], padding[0]]],
                     dtype=np.int64),
            'pool_method':
            method,
            'exclude_pad':
            exclude_pad,
            'global_pool':
            global_pooling,
            'output_spatial_shape':
            None,
            'rounding_type':
            rt
        }

        attrs.update(layout_attrs())
        attrs['pooling_convention'] = pooling_convention

        # update the attributes of the node
        Pooling.update_node_stat(node, attrs)
        return cls.enabled
    def extract(cls, node):
        attrs = common_onnx_pool_extractor(node)

        Pooling.update_node_stat(node, attrs)
        return cls.enabled
 def extract(cls, node):
     attrs = create_pooling_attrs(node, 'avg')
     attrs.update({'op': __class__.op})
     # update the attributes of the node
     Pooling.update_node_stat(node, attrs)
     return cls.enabled
Beispiel #18
0
    def replace_pattern(self, graph: nx.MultiDiGraph, match: dict):
        node = match['reduce']
        if not node.has_valid('reduce_type') or node.reduce_type.lower() not in self.supported_reduce_types:
            log.error("Reduce type {} is not supported for node {}".format(node.soft_get('reduce_type'), node.id))
            return

        reduce_type = node.reduce_type.lower()
        if reduce_type not in self.pool_method_map:
            log.error("Reduce type {} is not included in pool_method_map. Please update pool_method_map with new key "
                      "{}".format(reduce_type, reduce_type))
            return

        input_data = node.in_node()
        output_data = node.out_node()

        input_shape = node.in_node().shape
        output_shape = node.out_node().shape

        # normalize node.axis to exclude negative indices
        node.axis = [get_canonical_axis_index(input_shape, a) for a in node.axis]

        axis = node.axis

        # Check that values in axis list are consecutive
        for idx in range(1, len(axis)):
            if axis[idx] != (axis[idx - 1] + 1):
                log.error("Reduce with not consecutive axes {} is not supported ".format(axis))
                return

        layout = graph.graph['layout']

        # So now we are sure that we can convert Reduce to appropriate operation

        # 1. Calculate shape that will be used in reduction
        reduction_dim = np.prod([input_shape[idx] for idx in axis])
        begin_dims = np.array([input_shape[idx] for idx in range(axis[0])])
        end_dim = np.prod([input_shape[idx] for idx in range(axis[-1] + 1, len(input_shape))])

        # 2. Create reshape with appropriate shape
        if layout == 'NCHW':
            if len(begin_dims) > 2:
                begin_dims = np.array([np.prod(begin_dims[0:-1]), begin_dims[-1]], dtype=np.int64)
            else:
                # Expand begin_dims to 2
                begin_dims = np.array(np.append(begin_dims, [1] * (2 - len(begin_dims))), dtype=np.int64)
            reshape_shape = np.array([*begin_dims, reduction_dim, end_dim], dtype=np.int64)
            pool_window = np.array([1, 1, reduction_dim, 1], dtype=np.int64)
        elif layout == 'NHWC':
            begin_dims = np.prod(begin_dims)
            reshape_shape = np.array([begin_dims, reduction_dim, 1, end_dim], dtype=np.int64)
            pool_window = np.array([1, reduction_dim, 1, 1], dtype=np.int64)
        else:
            log.error('{} layout currently is not supported'.format(layout))
            return

        # 3. Reduce => Reshape->Pooling->Reshape
        reshape_op = Reshape(graph, {'name': node.id + '/Reshape', 'dim': reshape_shape})
        final_reshape_op = Reshape(graph, {'name': node.id + '/FinalReshape', 'dim': output_shape})
        pooling_op = Pooling(graph,
                             dict(name=node.id + '/Pool',
                                  window=pool_window,
                                  output_spatial_shape=None,
                                  batch_dims=np.array([get_batch_dim(layout, 4)], dtype=np.int64),
                                  channel_dims=np.array([get_features_dim(layout, 4)], dtype=np.int64),
                                  exclude_pad='false', pool_method=self.pool_method_map[reduce_type]))

        graph.remove_edge(input_data.id, node.id)
        graph.remove_edge(node.id, output_data.id)

        final_reshape_op.create_node_with_data(
            inputs=[pooling_op.create_node_with_data(
                inputs=[reshape_op.create_node_with_data(
                    inputs=[input_data]
                )]
            )],
            data_nodes=output_data)

        # 4. If it is reduction with summation, we need to multiply by size of the reduction slice with Mul op
        if reduce_type == 'sum':
            output_data.in_node().insert_node_with_data_after(
                output_data,
                Power,
                {'name': node.name + '/Mul', 'scale': float(reduction_dim)}
            )
Beispiel #19
0
    def extract(node):
        attrs = common_onnx_pool_extractor(node)

        Pooling.update_node_stat(node, attrs)
        return __class__.enabled
Beispiel #20
0
    def replace_pattern(self, graph: Graph, match: dict):
        node = match['reduce']

        if node.out_port(0).data.get_value() is not None:
            # We leave Reduce* operations located in constant sub-graph as is
            # to keep model reshapable with --keep_shape_ops cli key
            return

        reduce_type = node.type
        if reduce_type not in self.pool_method_map:
            log.error(
                "Reduce type {} is not included in pool_method_map. Please update pool_method_map with new key "
                "{}".format(reduce_type, reduce_type))
            return

        input_data = node.in_node()
        output_data = node.out_node()

        input_shape = node.in_port(0).data.get_shape()
        output_shape = node.out_port(0).data.get_shape()

        # normalize node axes to exclude negative indices
        axes_data_value = node.in_port(1).data.get_value()
        axes = int64_array([
            axes_data_value.item()
        ]) if axes_data_value.size == 1 else axes_data_value
        axes = [get_canonical_axis_index(input_shape, a) for a in axes]
        axes = sorted(axes)

        # Check that values in axes list are consecutive
        for idx in range(1, len(axes)):
            if axes[idx] != (axes[idx - 1] + 1):
                log.error(
                    "Reduce with not consecutive axes {} is not supported ".
                    format(axes))
                return
        # So now we are sure that we can convert Reduce to appropriate operation

        # 1. Calculate shape that will be used in reduction
        reduction_dim = np.prod([input_shape[idx] for idx in axes])
        begin_dims = np.array([input_shape[idx] for idx in range(axes[0])])
        end_dim = np.prod([
            input_shape[idx] for idx in range(axes[-1] + 1, len(input_shape))
        ])

        # 2. Create reshape with appropriate shape
        if len(begin_dims) > 2:
            if 0 not in axes:
                begin_dims = int64_array(
                    [begin_dims[0], np.prod(begin_dims[1:])])
            else:
                begin_dims = int64_array(
                    [np.prod(begin_dims[0:-1]), begin_dims[-1]])
        else:
            # Expand begin_dims to 2
            begin_dims = int64_array(
                np.append(begin_dims, [1] * (2 - len(begin_dims))))

        reshape_shape = int64_array([*begin_dims, reduction_dim, end_dim])
        pool_window = int64_array([1, 1, reduction_dim, 1])

        if end_dim == 1:
            new_window = ReduceReplacer.initial_reshape_dim_normalizer(
                reduction_dim)
            reshape_shape = int64_array([*begin_dims, *new_window])
            pool_window = int64_array([1, 1, *new_window])

        # 3. Reduce => Reshape->Pooling->Reshape
        reshape_op = Reshape(graph, {'name': node.id + '/Reshape'})
        reshape_dim_const_data = Const(graph, {
            'name': node.id + '/Reshape/Dim',
            'value': reshape_shape
        }).create_node_with_data()

        final_reshape_op = Reshape(graph, {'name': node.id + '/FinalReshape'})
        final_reshape_dim_const_data = Const(graph, {
            'name': node.id + '/FinalReshape/Dim',
            'value': output_shape
        }).create_node_with_data()
        pooling_op = Pooling(
            graph,
            dict(name=node.id + '/Pool',
                 window=pool_window,
                 output_spatial_shape=None,
                 batch_dims=int64_array([0]),
                 channel_dims=int64_array([1]),
                 exclude_pad='false',
                 pool_method=self.pool_method_map[reduce_type]))

        graph.remove_edge(input_data.id, node.id)
        graph.remove_edge(node.id, output_data.id)

        if np.array_equal(input_shape, reshape_shape):
            input_to_pooling = input_data
        else:
            input_to_pooling = reshape_op.create_node_with_data(
                inputs=[input_data, reshape_dim_const_data])
        pooling = pooling_op.create_node_with_data(inputs=[input_to_pooling])
        final_reshape_op.create_node_with_data(
            inputs=[pooling, final_reshape_dim_const_data],
            data_nodes=output_data)

        # convert batch dimension to 0 to produce reshape-able IR over the batch dimension
        if 0 not in axes:
            reshape_dim_const_data.in_node(0).value[0] = 0
            final_reshape_dim_const_data.in_node(0).value[0] = 0

        # 4. If it is reduction with summation, we need to multiply by size of the reduction slice with Mul op
        if reduce_type == 'ReduceSum':
            output_data.in_node().insert_node_with_data_after(
                output_data, AttributedPower, {
                    'name': node.name + '/Mul',
                    'scale': float(reduction_dim)
                })