예제 #1
0
def test_adaptive_avg_pool2d():
    inp = tensor(np.arange(0, 16, dtype=np.float32).reshape(1, 1, 4, 4))
    oshp = (2, 2)
    grad = Grad().wrt(inp, callback=_save_to(inp))
    outp = F.adaptive_avg_pool2d(
        inp,
        oshp,
    )
    assert make_shape_tuple(outp.shape) == (
        inp.shape[0],
        inp.shape[1],
        *oshp,
    )
    np.testing.assert_equal(
        outp.numpy(), np.array([[[[2.5, 4.5], [10.5, 12.5]]]],
                               dtype=np.float32))

    grad(outp, tensor(F.ones_like(outp)))
    assert make_shape_tuple(inp.grad.shape) == make_shape_tuple(inp.shape)
    np.testing.assert_equal(
        inp.grad.numpy(),
        np.array(
            [[[
                [0.25, 0.25, 0.25, 0.25],
                [0.25, 0.25, 0.25, 0.25],
                [0.25, 0.25, 0.25, 0.25],
                [0.25, 0.25, 0.25, 0.25],
            ]]],
            dtype=np.float32,
        ),
    )
예제 #2
0
    def forward(self, inputs):
        out_l1 = self.level1(inputs)
        if not self.reinf:
            del inputs
            inputs = None
        out_l2 = self.level2(out_l1, inputs)

        out_l3_0 = self.level3_0(out_l2, inputs)
        for i, layer in enumerate(self.level3):
            if i == 0:
                out_l3 = layer(out_l3_0)
            else:
                out_l3 = layer(out_l3)

        outl4_0 = self.level4_0(out_l3, inputs)
        for i, layer in enumerate(self.level4):
            if i == 0:
                out_l4 = layer(outl4_0)
            else:
                out_l4 = layer(out_l4)

        outl5_0 = self.level5_0(out_l4, inputs)
        for i, layer in enumerate(self.level5):
            if i == 0:
                out_l5 = layer(outl5_0)
            else:
                out_l5 = layer(out_l5)
        net = F.adaptive_avg_pool2d(out_l5, 1)
        net = F.flatten(net, 1)
        net = self.classifier(net)
        return net
예제 #3
0
파일: layers.py 프로젝트: Qsingle/Megvision
    def forward(self, x):
        net = self.conv(x)
        bs = x.shape[0]
        if self.use_bn:
            net = self.bn0(net)
        net = self.relu(net)
        if self.radix > 1:
            splits = F.split(net, int(self.radix), axis=1)
            gap = sum(splits)
        else:
            gap = net
        gap = F.adaptive_avg_pool2d(gap, 1)
        gap = self.fc1(gap)
        if self.use_bn:
            gap = self.bn1(gap)
        gap = self.relu(gap)
        atten = self.fc2(gap)
        atten = self.rsoftmax(atten).reshape(bs, -1, 1, 1)

        if self.radix > 1:
            attens = F.split(atten, int(self.radix), axis=1)
            out = sum([att * split for att, split in zip(attens, splits)])
        else:
            out = atten * net
        return out
예제 #4
0
    def forward(self, x):
        #do the conv
        net = self.conv(x)
        if self.use_bn:
            net = self.bn0(net)

        if self.droupblock_prob > 0.0:
            net = self.droupblock(net)

        net = self.relu(net)
        #split from the channels
        batch = net.shape[0]

        if self.radix > 1:
            splited = F.split(net, self.radix, axis=1)
            gap = sum(splited)
        #calculate the attention
        gap = F.adaptive_avg_pool2d(gap, 1)
        gap = self.fc1(gap)

        if self.use_bn:
            gap = self.bn1(gap)

        atten = self.fc2(gap)
        atten = self.rsoftmax(atten).reshape(batch, -1, 1, 1)

        if self.radix > 1:
            attens = F.split(atten, self.radix, axis=1)

            out = sum([att * split for (att, split) in zip(attens, splited)])
        else:
            out = atten * x

        return out
예제 #5
0
        nn.Conv2d(32, 32, 3, 1, 0, groups=32).cuda(),
    ),
    "conv3d": (MM.Conv3d(32, 32, 3, 1, 0), nn.Conv3d(32, 32, 3, 1, 0).cuda()),
    "ConvTranspose2d": (
        MM.ConvTranspose2d(32, 32, 3, 1, 0),
        nn.ConvTranspose2d(32, 32, 3, 1, 0).cuda(),
    ),
    "BatchNorm2d": (MM.BatchNorm2d(64), nn.BatchNorm2d(64).cuda()),
    "Linear": (MM.Linear(1000, 1000), nn.Linear(1000, 1000).cuda()),
}

test_cases = [
    # (mge op, torch op, small inps, large inps, unpack_inps, rep)
    (
        "adaptive_avg_pool2d",
        lambda x: MF.adaptive_avg_pool2d(x, (7, 7)),
        lambda x: TF.adaptive_avg_pool2d(x, (7, 7)),
        [(2, 32, 16, 16)],
        [(64, 512, 16, 16)],
        True,
        1000,
    ),
    (
        "adaptive_max_pool2d",
        lambda x: MF.adaptive_max_pool2d(x, (7, 7)),
        lambda x: TF.adaptive_max_pool2d(x, (7, 7)),
        [(2, 32, 16, 16)],
        [(64, 512, 16, 16)],
        True,
        1000,
    ),