Esempio n. 1
0
    def forward(self, x):

        x = self.conv_1(x)
        x = self.BN_1(x)
        x = F.relu(x)
        x = F.max_pool2d(x, kernel_size=2)  # 第1个MAX_POOL层

        x = self.conv_2(x)
        x = self.BN_2(x)
        x = F.relu(x)
        x = F.max_pool2d(x, kernel_size=2)  # 第2个MAX_POOL层

        x = self.conv_3(x)
        x = self.BN_3(x)
        x = F.relu(x)
        x = F.max_pool2d(x, kernel_size=2)  # 第3个MAX_POOL层

        x = self.conv_4(x)
        x = self.BN_4(x)
        x = F.relu(x)
        x = F.max_pool2d(x, kernel_size=2)  # 第4个MAX_POOL层

        x = paddle.flatten(x, 1, -1)  ## flatten
        x = self.linear(x)  # linear

        output = x

        return output
    def build_model(self, exec_mode):
        feed_shape = list(self.feed_shape[0])
        if self.is_ipu_mode(exec_mode):
            feed_shape[0] = 1
        x = paddle.static.data(
            name=self.feed_list[0], shape=feed_shape, dtype='float32')
        with paddle.static.ipu_shard_guard(index=0, stage=0):
            # using fp32
            x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
            x = paddle.static.nn.batch_norm(x, act='relu')
            x = F.max_pool2d(x, kernel_size=2, stride=2)

        with paddle.static.ipu_shard_guard(index=1, stage=1):
            # using fp16
            with paddle.static.amp.fp16_guard():
                x = paddle.static.nn.conv2d(
                    input=x, num_filters=6, filter_size=3)
                x = paddle.static.nn.batch_norm(x, act='relu')
                x = F.max_pool2d(x, kernel_size=2, stride=2)

        with paddle.static.ipu_shard_guard(index=2, stage=2):
            # using fp32
            x = paddle.static.nn.fc(x, size=10)
            loss = paddle.mean(x)
        self.fetch_list = [loss.name]
Esempio n. 3
0
    def forward(self, x):
        logit_list = []

        x = self.enco1(x)
        x, ind1 = F.max_pool2d(x, kernel_size=2, stride=2, return_mask=True)
        size1 = x.shape

        x = self.enco2(x)
        x, ind2 = F.max_pool2d(x, kernel_size=2, stride=2, return_mask=True)
        size2 = x.shape

        x = self.enco3(x)
        x, ind3 = F.max_pool2d(x, kernel_size=2, stride=2, return_mask=True)
        size3 = x.shape

        x = self.enco4(x)
        x, ind4 = F.max_pool2d(x, kernel_size=2, stride=2, return_mask=True)
        size4 = x.shape

        x = self.enco5(x)
        x, ind5 = F.max_pool2d(x, kernel_size=2, stride=2, return_mask=True)
        size5 = x.shape

        x = F.max_unpool2d(x,
                           indices=ind5,
                           kernel_size=2,
                           stride=2,
                           output_size=size4)
        x = self.deco1(x)

        x = F.max_unpool2d(x,
                           indices=ind4,
                           kernel_size=2,
                           stride=2,
                           output_size=size3)
        x = self.deco2(x)

        x = F.max_unpool2d(x,
                           indices=ind3,
                           kernel_size=2,
                           stride=2,
                           output_size=size2)
        x = self.deco3(x)

        x = F.max_unpool2d(x,
                           indices=ind2,
                           kernel_size=2,
                           stride=2,
                           output_size=size1)
        x = self.deco4(x)

        x = F.max_unpool2d(x, indices=ind1, kernel_size=2, stride=2)
        x = self.deco5(x)

        logit_list.append(x)

        return logit_list
Esempio n. 4
0
    def forward(self, x, pool_size=(2, 2), pool_type='avg'):
        x = self.conv1(x)
        x = self.bn1(x)
        x = F.relu(x)

        if pool_type == 'max':
            x = F.max_pool2d(x, kernel_size=pool_size)
        elif pool_type == 'avg':
            x = F.avg_pool2d(x, kernel_size=pool_size)
        elif pool_type == 'avg+max':
            x = F.avg_pool2d(x, kernel_size=pool_size) + F.max_pool2d(x, kernel_size=pool_size)
        else:
            raise Exception(
                f'Pooling type of {pool_type} is not supported. It must be one of "max", "avg" and "avg+max".')
        return x
Esempio n. 5
0
    def forward(self, x):
        channel_att_sum = None
        for pool_type in self.pool_types:
            if pool_type == 'avg':
                avg_pool = F.avg_pool2d(x, (x.shape[2], x.shape[3]),
                                        stride=(x.shape[2], x.shape[3]))
                channel_att_raw = self.mlp(avg_pool)
            elif pool_type == 'max':
                max_pool = F.max_pool2d(x, (x.shape[2], x.shape[3]),
                                        stride=(x.shape[2], x.shape[3]))
                channel_att_raw = self.mlp(max_pool)
            elif pool_type == 'lp':
                lp_pool = F.lp_pool2d(x,
                                      2, (x.shape[2], x.shape[3]),
                                      stride=(x.shape[2], x.shape[3]))
                channel_att_raw = self.mlp(lp_pool)
            elif pool_type == 'lse':
                # LSE pool only
                lse_pool = logsumexp_2d(x)
                channel_att_raw = self.mlp(lse_pool)

            if channel_att_sum is None:
                channel_att_sum = channel_att_raw
            else:
                channel_att_sum = channel_att_sum + channel_att_raw

        scale = F.sigmoid(channel_att_sum)
        scale = paddle.unsqueeze(scale, 2)
        scale = paddle.unsqueeze(scale, 3)
        scale = paddle.expand_as(scale, x)
        return x * scale
Esempio n. 6
0
    def forward(self, feat, img_metas=None):
        if img_metas is not None:
            assert len(img_metas[0]) == feat.shape[0]

        valid_ratios = None
        if img_metas is not None and self.mask:
            valid_ratios = img_metas[-1]

        h_feat = feat.shape[2]  # bsz c h w
        feat_v = F.max_pool2d(feat,
                              kernel_size=(h_feat, 1),
                              stride=1,
                              padding=0)
        feat_v = feat_v.squeeze(2)  # bsz * C * W
        feat_v = paddle.transpose(feat_v, perm=[0, 2, 1])  # bsz * W * C
        holistic_feat = self.rnn_encoder(feat_v)[0]  # bsz * T * C

        if valid_ratios is not None:
            valid_hf = []
            T = holistic_feat.shape[1]
            for i, valid_ratio in enumerate(valid_ratios):
                valid_step = min(T, math.ceil(T * valid_ratio)) - 1
                valid_hf.append(holistic_feat[i, valid_step, :])
            valid_hf = paddle.stack(valid_hf, axis=0)
        else:
            valid_hf = holistic_feat[:, -1, :]  # bsz * C
        holistic_feat = self.linear(valid_hf)  # bsz * C

        return holistic_feat
Esempio n. 7
0
    def test_case(self):
        import paddle
        import paddle.nn.functional as F
        import paddle.fluid.core as core
        import paddle.fluid as fluid
        paddle.enable_static()

        input_data = np.array([[[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12],
                                 [13, 14, 15, 16]]]]).astype("float32")

        x = fluid.data(name="x", shape=[1, 1, 4, 4], dtype="float32")
        output, indices = F.max_pool2d(
            x, kernel_size=2, stride=2, return_mask=True)
        unpool_out = F.max_unpool2d(
            output, indices, kernel_size=2, stride=None, output_size=(5, 5))
        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())

        results = exe.run(paddle.fluid.default_main_program(),\
                          feed={"x":input_data},
                          fetch_list=[unpool_out],
                          return_numpy=True)

        pool_out_np = np.array([[[[6., 8.], [14., 16.]]]]).astype("float32")
        indices_np = np.array([[[[5, 7], [13, 15]]]]).astype("int32")
        expect_res =unpool2dmax_forward_naive(pool_out_np, indices_np, [2,2], \
            [2,2], [0,0], [5,5]).astype("float64")
        self.assertTrue(np.allclose(results[0], expect_res))
Esempio n. 8
0
 def forward(self, x):
     bottom_up_features = self.bottom_up(x)
     bottom_up_features = bottom_up_features[self.output_b - 2:]
     bottom_up_features = bottom_up_features[::-1]
     results = []
     prev_features = self.lateral_convs[0](bottom_up_features[0])
     results.append(self.output_convs[0](prev_features))
     for l_id, (features, lateral_conv, output_conv) in enumerate(
             zip(bottom_up_features[1:], self.lateral_convs[1:],
                 self.output_convs[1:])):
         top_down_features = F.interpolate(prev_features,
                                           scale_factor=2,
                                           mode="bilinear",
                                           align_corners=False)
         lateral_features = lateral_conv(features)
         prev_features = lateral_features + top_down_features
         results.append(output_conv(prev_features))
     if (self.output_e == 6):
         p6 = F.max_pool2d(results[0], kernel_size=1, stride=2, padding=0)
         results.insert(0, p6)
     elif (self.output_e == 7):
         p6 = self.p6(results[0])
         results.insert(0, p6)
         p7 = self.p7(F.relu(results[0]))
         results.insert(0, p7)
     return results
Esempio n. 9
0
    def test_case(self):
        import paddle
        import paddle.nn.functional as F
        import paddle.fluid.core as core
        import paddle.fluid as fluid
        import numpy as np

        if core.is_compiled_with_cuda():
            place = core.CUDAPlace(0)
        else:
            place = core.CPUPlace()
        with fluid.dygraph.guard(place):
            input_data = np.array([[[[1, 2, 3, 4], [5, 6, 7,
                                                    8], [9, 10, 11, 12],
                                     [13, 14, 15, 16]]]]).astype("float32")
            input_x = paddle.to_tensor(input_data)
            output, indices = F.max_pool2d(input_x,
                                           kernel_size=2,
                                           stride=2,
                                           return_mask=True)
            out_pp = F.max_unpool2d(output,
                                    indices,
                                    kernel_size=2,
                                    stride=None,
                                    output_size=(5, 5))
            output_np = output.numpy()
            indices_np = indices.numpy()
            expect_res =unpool2dmax_forward_naive(output_np, indices_np, [2,2], \
                [2,2], [0,0], [5,5]).astype("float64")
            self.assertTrue(np.allclose(out_pp.numpy(), expect_res))
Esempio n. 10
0
    def forward(self, x):
        # Encoder
        input_shape = paddle.shape(x)[2:]

        x = self.conv_bn0(x)  # 1/2
        shortcut = self.conv_bn1(x)  # shortcut
        x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)  # 1/4
        x = self.block1(x)  # 1/8
        x = self.block2(x)  # 1/16

        # Decoder
        x = self.depthwise_separable0(x)
        shortcut_shape = paddle.shape(shortcut)[2:]
        x = F.interpolate(
            x,
            shortcut_shape,
            mode='bilinear',
            align_corners=self.align_corners)
        x = paddle.concat(x=[shortcut, x], axis=1)
        x = self.depthwise_separable1(x)

        logit = self.depthwise_separable2(x)
        logit = F.interpolate(
            logit,
            input_shape,
            mode='bilinear',
            align_corners=self.align_corners)

        return [logit]
Esempio n. 11
0
 def _simple_nms(self, heat, kernel=3):
     """
     Use maxpool to filter the max score, get local peaks.
     """
     pad = (kernel - 1) // 2
     hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
     keep = paddle.cast(hmax == heat, 'float32')
     return heat * keep
Esempio n. 12
0
 def run1():
     with fluid.dygraph.guard():
         input_np = np.random.uniform(-1, 1,
                                      [2, 3, 32, 32]).astype(np.float32)
         input_pd = fluid.dygraph.to_variable(input_np)
         padding = [[0, 1], [0, 0], [0, 0], [0, 0]]
         res_pd = max_pool2d(
             input_pd, kernel_size=2, stride=2, padding=padding)
    def build_model(self):
        x = paddle.static.data(
            name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32')

        # using fp32
        x = paddle.static.nn.conv2d(input=x, num_filters=3, filter_size=3)
        x = paddle.static.nn.batch_norm(x, act='relu')
        x = F.max_pool2d(x, kernel_size=2, stride=2)

        # using fp16
        with paddle.static.amp.fp16_guard():
            x = paddle.static.nn.conv2d(input=x, num_filters=6, filter_size=3)
            x = paddle.static.nn.batch_norm(x, act='relu')
            x = F.max_pool2d(x, kernel_size=2, stride=2)

        # using fp32
        x = paddle.static.nn.fc(x, size=10)
        loss = paddle.mean(x)
        self.fetch_list = [loss.name]
Esempio n. 14
0
 def forward(self, inputs):
     x = inputs['image']
     conv1 = self.conv1(x)
     x = F.max_pool2d(conv1, kernel_size=3, stride=2, padding=1)
     outs = []
     for idx, stage in enumerate(self.res_layers):
         x = stage(x)
         if idx in self.return_idx:
             outs.append(x)
     return outs
Esempio n. 15
0
    def _forward(self, x):
        branch3x3 = self.branch3x3(x)

        branch3x3dbl = self.branch3x3dbl_1(x)
        branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
        branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)

        branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)

        outputs = [branch3x3, branch3x3dbl, branch_pool]
        return outputs
Esempio n. 16
0
 def run3():
     with fluid.dygraph.guard():
         input_np = np.random.uniform(-1, 1,
                                      [2, 3, 32, 32]).astype(np.float32)
         input_pd = fluid.dygraph.to_variable(input_np)
         padding = "padding"
         res_pd = max_pool2d(input_pd,
                             kernel_size=2,
                             stride=2,
                             padding=padding,
                             data_format='NHWC')
Esempio n. 17
0
 def forward(self, inputs):
     x = inputs['image']
     res1 = self.res1(x)
     x = F.max_pool2d(res1, kernel_size=3, stride=2, padding=1)
     outs = []
     for idx, stage in enumerate(self.res_layers):
         x = stage(x)
         if idx == self.freeze_at:
             x.stop_gradient = True
         if idx in self.return_idx:
             outs.append(x)
     return outs
Esempio n. 18
0
    def _forward(self, x):
        branch3x3 = self.branch3x3_1(x)
        branch3x3 = self.branch3x3_2(branch3x3)

        branch7x7x3 = self.branch7x7x3_1(x)
        branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
        branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
        branch7x7x3 = self.branch7x7x3_4(branch7x7x3)

        branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
        outputs = [branch3x3, branch7x7x3, branch_pool]
        return outputs
Esempio n. 19
0
 def run9():
     with fluid.dygraph.guard():
         input_np = np.random.uniform(-1, 1,
                                      [2, 3, 32, 32]).astype(np.float32)
         input_pd = fluid.dygraph.to_variable(input_np)
         res_pd = max_pool2d(input_pd,
                             kernel_size=2,
                             stride=2,
                             padding=0,
                             ceil_mode=False,
                             data_format='NHWC',
                             return_indices=True)
Esempio n. 20
0
    def forward(self, X):
        h = F.relu(self.conv1_1(X))
        h = F.relu(self.conv1_2(h))
        relu1_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv2_1(h))
        h = F.relu(self.conv2_2(h))
        relu2_2 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        relu3_3 = h
        h = F.max_pool2d(h, kernel_size=2, stride=2)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        relu4_3 = h

        return [relu1_2, relu2_2, relu3_3, relu4_3]
Esempio n. 21
0
 def forward(self, inputs):
     y = paddle.reshape(
         inputs, [-1, inputs.shape[2], inputs.shape[3], inputs.shape[4]])
     y = self.conv(y)
     y = F.max_pool2d(y, kernel_size=3, stride=2, padding=1)
     for bottleneck_block in self.bottleneck_block_list:
         y = bottleneck_block(y)
     y = F.adaptive_avg_pool2d(y, output_size=1)
     y = F.dropout(y, p=0.5)
     y = paddle.reshape(y, [-1, self.seg_num, y.shape[1]])
     y = paddle.mean(y, axis=1)
     y = paddle.reshape(y, shape=[-1, 2048])
     y = self.out(y)
     return y
Esempio n. 22
0
 def forward(self, inputs):
     x = inputs
     conv1 = self.conv1(x)
     x = F.max_pool2d(conv1, kernel_size=3, stride=2, padding=1)
     outs = []
     for idx, stage in enumerate(self.res_layers):
         x = stage(x)
         if idx == self.freeze_at:
             x.stop_gradient = True
         if idx in self.return_idx:
             outs.append(x)
     x = self.output_flatten(outs[-1])
     x = self.output_fc(x)
     x = self.bn_output(x)
     return x
Esempio n. 23
0
    def forward(self, feats):
        # feats: [P3 - P7]
        lateral_feats = []

        # up
        up_feature = feats[-1]
        for i, feature in enumerate(feats[::-1]):
            if i == 0:
                lateral_feats.append(feature)
            else:
                shape = paddle.shape(feature)
                up_feature = F.interpolate(up_feature,
                                           size=[shape[2], shape[3]])
                lateral_feature = self._feature_fusion_cell(
                    self.conv_up[i - 1],
                    feature,
                    up_feature,
                    weights=self.up_weights[i - 1]
                    if self.use_weighted_fusion else None)
                lateral_feats.append(lateral_feature)
                up_feature = lateral_feature

        out_feats = []
        # down
        down_feature = lateral_feats[-1]
        for i, (lateral_feature,
                route_feature) in enumerate(zip(lateral_feats[::-1], feats)):
            if i == 0:
                out_feats.append(lateral_feature)
            else:
                down_feature = F.max_pool2d(down_feature, 3, 2, 1)
                if i == len(feats) - 1:
                    route_feature = None
                    weights = self.down_weights[
                        i - 1][:2] if self.use_weighted_fusion else None
                else:
                    weights = self.down_weights[
                        i - 1] if self.use_weighted_fusion else None
                out_feature = self._feature_fusion_cell(self.conv_down[i - 1],
                                                        lateral_feature,
                                                        down_feature,
                                                        route_feature,
                                                        weights=weights)
                out_feats.append(out_feature)
                down_feature = out_feature

        return out_feats
Esempio n. 24
0
    def forward(self, body_feats):
        laterals = []
        used_backbone_levels = len(self.spatial_scale)
        for i in range(used_backbone_levels):
            laterals.append(self.lateral_convs[i](body_feats[i]))

        used_backbone_levels = len(self.spatial_scale)
        for i in range(used_backbone_levels - 1):
            idx = used_backbone_levels - 1 - i
            upsample = F.interpolate(
                laterals[idx],
                scale_factor=2.,
                mode='nearest',
            )
            laterals[idx - 1] += upsample

        fpn_output = []
        for lvl in range(self.min_level, self.highest_backbone_level + 1):
            i = lvl - self.min_level
            fpn_output.append(self.fpn_convs[i](laterals[i]))

        spatial_scales = self.spatial_scale
        if self.num_outs > len(fpn_output):
            # use max pool to get more levels on top of outputs (Faster R-CNN, Mask R-CNN)
            if not self.has_extra_convs:
                fpn_output.append(F.max_pool2d(fpn_output[-1], 1, stride=2))
                spatial_scales = spatial_scales + [spatial_scales[-1] * 0.5]
            # add extra conv levels for RetinaNet(use_c5)/FCOS(use_p5)
            else:
                if self.use_c5:
                    extra_source = body_feats[-1]
                else:
                    extra_source = fpn_output[-1]
                fpn_output.append(
                    self.fpn_convs[used_backbone_levels](extra_source))
                spatial_scales = spatial_scales + [spatial_scales[-1] * 0.5]
                for i in range(used_backbone_levels + 1, self.num_outs):
                    if self.relu_before_extra_convs:
                        fpn_output.append(self.fpn_convs[i](F.relu(
                            fpn_output[-1])))
                    else:
                        fpn_output.append(self.fpn_convs[i](fpn_output[-1]))
                    spatial_scales = spatial_scales + [
                        spatial_scales[-1] * 0.5
                    ]
        return fpn_output, spatial_scales
Esempio n. 25
0
    def forward(self, x):
        if not self.training or self.keep_prob == 1:
            return x
        else:
            gamma = (1. - self.keep_prob) / (self.block_size**2)
            for s in x.shape[2:]:
                gamma *= s / (s - self.block_size + 1)

            matrix = paddle.cast(
                paddle.rand(x.shape, x.dtype) < gamma, x.dtype)
            mask_inv = F.max_pool2d(matrix,
                                    self.block_size,
                                    stride=1,
                                    padding=self.block_size // 2)
            mask = 1. - mask_inv
            y = x * mask * (mask.numel() / mask.sum())
            return y
Esempio n. 26
0
 def forward(self, x):
     outputs = []
     # stem
     x = self.conv1(x)
     x = self.bn1(x)
     x = F.relu_(x)
     x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
     # blocks
     x = self.layer1(x)
     outputs.append(x)
     x = self.layer2(x)
     outputs.append(x)
     x = self.layer3(x)
     outputs.append(x)
     x = self.layer4(x)
     outputs.append(x)
     return outputs
Esempio n. 27
0
def nms_hm(heat_map, kernel=3):
    """Do max_pooling for nms

    Args:
        heat_map (paddle.Tensor): pred cls heatmap
        kernel (int, optional): max_pool kernel size. Defaults to 3.

    Returns:
        heatmap after nms
    """
    pad = (kernel - 1) // 2

    hmax = F.max_pool2d(heat_map,
                        kernel_size=(kernel, kernel),
                        stride=1,
                        padding=pad)
    eq_index = (hmax == heat_map).astype("float32")

    return heat_map * eq_index
Esempio n. 28
0
    def check_max_dygraph_ceilmode_results(self, place):
        with fluid.dygraph.guard(place):
            input_np = np.random.random([2, 3, 32, 32]).astype("float32")
            input = fluid.dygraph.to_variable(input_np)
            result = max_pool2d(
                input, kernel_size=2, stride=2, padding=0, ceil_mode=True)

            result_np = max_pool2D_forward_naive(
                input_np,
                ksize=[2, 2],
                strides=[2, 2],
                paddings=[0, 0],
                ceil_mode=True)
            self.assertTrue(np.allclose(result.numpy(), result_np))

            max_pool2d_dg = paddle.nn.layer.MaxPool2D(
                kernel_size=2, stride=2, padding=0, ceil_mode=True)
            result = max_pool2d_dg(input)
            self.assertTrue(np.allclose(result.numpy(), result_np))
Esempio n. 29
0
    def check_max_static_results(self, place):
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            input = fluid.data(
                name="input", shape=[2, 3, 32, 32], dtype="float32")
            result = max_pool2d(input, kernel_size=2, stride=2, padding=0)

            input_np = np.random.random([2, 3, 32, 32]).astype("float32")
            result_np = pool2D_forward_naive(
                input_np,
                ksize=[2, 2],
                strides=[2, 2],
                paddings=[0, 0],
                pool_type='max')

            exe = fluid.Executor(place)
            fetches = exe.run(fluid.default_main_program(),
                              feed={"input": input_np},
                              fetch_list=[result])
            self.assertTrue(np.allclose(fetches[0], result_np))
Esempio n. 30
0
    def check_max_dygraph_nhwc_results(self, place):
        with fluid.dygraph.guard(place):
            input_np = np.random.random([2, 3, 32, 32]).astype("float32")
            input = fluid.dygraph.to_variable(
                np.transpose(input_np, [0, 2, 3, 1]))
            result = max_pool2d(input,
                                kernel_size=2,
                                stride=2,
                                padding=0,
                                return_indices=False,
                                data_format="NHWC")

            result_np = pool2D_forward_naive(input_np,
                                             ksize=[2, 2],
                                             strides=[2, 2],
                                             paddings=[0, 0],
                                             pool_type='max')
            self.assertTrue(
                np.allclose(np.transpose(result.numpy(), [0, 3, 1, 2]),
                            result_np))