def test_pooling():
    A = np.ones((1, 2, INT_OVERFLOW))
    A[0][0][2] = 100
    A.attach_grad()
    with mx.autograd.record():
        B = npx.pooling(data=A, kernel=(2), stride=2, pool_type='max')
    assert B.shape == (1, 2, HALF_INT_OVERFLOW)
    assert B[0][0][1] == 100
    B.backward()
    assert A.grad.shape == (1, 2, INT_OVERFLOW)
    assert A.grad[0][0][0] == 1
def test_pooling():
    def test_pooling_large_dim():
        A = np.ones((1, 1, INT_OVERFLOW))
        assertRaises(MXNetError, npx.pooling, data=A, kernel=(2), stride=(2), \
                pool_type='max')

    test_pooling_large_dim()
    D, H, W = 2**12, 2**10, 2**10
    A = np.ones((1, 1, D, H, W))
    A[0, 0, 0, 0, 2] = 100
    A.attach_grad()
    with mx.autograd.record():
        B = npx.pooling(data=A, kernel=(2, 2, 2), stride=(2, 2, 2), \
                pool_type='max')
    assert B.shape == (1, 1, int(D / 2), int(H / 2), int(W / 2))
    assert B[0, 0, 0, 0, 1] == 100
    B.backward()
    assert A.grad.shape == (1, 1, D, H, W)
    assert A.grad[0, 0, 0, 0, 0] == 1
Esempio n. 3
0
    def forward(self, input):
        _, _, h, w = input.shape

        p = [input]
        for i in range(self.depth):
            hnew = h // (2**i)
            wnew = w // (2**i)
            kernel = (hnew, wnew)
            x = FFx.pooling(input,
                            kernel=kernel,
                            stride=kernel,
                            pool_type='max')
            #x = mx.nd.UpSampling(x.as_nd_ndarray(),sample_type='nearest',scale=hnew)
            x = mx.contrib.ndarray.BilinearResize2D(x.as_nd_ndarray(),
                                                    height=h,
                                                    width=w)
            x = x.as_np_ndarray()
            x = self.convs[i](x)
            p += [x]

        out = FF.concatenate(p, axis=1)
        out = self.conv_norm_final(out)

        return out