def test_structure(self):
        # test drop_path_rate decay
        cfg = deepcopy(self.cfg)
        cfg['drop_path_rate'] = 0.2
        model = SwinTransformer(**cfg)
        depths = model.arch_settings['depths']
        blocks = chain(*[stage.blocks for stage in model.stages])
        for i, block in enumerate(blocks):
            expect_prob = 0.2 / (sum(depths) - 1) * i
            self.assertAlmostEqual(block.ffn.dropout_layer.drop_prob,
                                   expect_prob)
            self.assertAlmostEqual(block.attn.drop.drop_prob, expect_prob)

        # test Swin-Transformer with norm_eval=True
        cfg = deepcopy(self.cfg)
        cfg['norm_eval'] = True
        cfg['norm_cfg'] = dict(type='BN')
        cfg['stage_cfgs'] = dict(block_cfgs=dict(norm_cfg=dict(type='BN')))
        model = SwinTransformer(**cfg)
        model.init_weights()
        model.train()
        self.assertTrue(check_norm_state(model.modules(), False))

        # test Swin-Transformer with first stage frozen.
        cfg = deepcopy(self.cfg)
        frozen_stages = 0
        cfg['frozen_stages'] = frozen_stages
        cfg['out_indices'] = (0, 1, 2, 3)
        model = SwinTransformer(**cfg)
        model.init_weights()
        model.train()

        # the patch_embed and first stage should not require grad.
        self.assertFalse(model.patch_embed.training)
        for param in model.patch_embed.parameters():
            self.assertFalse(param.requires_grad)
        for i in range(frozen_stages + 1):
            stage = model.stages[i]
            for param in stage.parameters():
                self.assertFalse(param.requires_grad)
        for param in model.norm0.parameters():
            self.assertFalse(param.requires_grad)

        # the second stage should require grad.
        for i in range(frozen_stages + 1, 4):
            stage = model.stages[i]
            for param in stage.parameters():
                self.assertTrue(param.requires_grad)
            norm = getattr(model, f'norm{i}')
            for param in norm.parameters():
                self.assertTrue(param.requires_grad)
    def test_forward(self):
        imgs = torch.randn(3, 3, 224, 224)

        cfg = deepcopy(self.cfg)
        model = SwinTransformer(**cfg)
        outs = model(imgs)
        self.assertIsInstance(outs, tuple)
        self.assertEqual(len(outs), 1)
        feat = outs[-1]
        self.assertEqual(feat.shape, (3, 1024, 7, 7))

        # test with window_size=12
        cfg = deepcopy(self.cfg)
        cfg['window_size'] = 12
        model = SwinTransformer(**cfg)
        outs = model(torch.randn(3, 3, 384, 384))
        self.assertIsInstance(outs, tuple)
        self.assertEqual(len(outs), 1)
        feat = outs[-1]
        self.assertEqual(feat.shape, (3, 1024, 12, 12))
        with self.assertRaisesRegex(AssertionError, r'the window size \(12\)'):
            model(torch.randn(3, 3, 224, 224))

        # test with pad_small_map=True
        cfg = deepcopy(self.cfg)
        cfg['window_size'] = 12
        cfg['pad_small_map'] = True
        model = SwinTransformer(**cfg)
        outs = model(torch.randn(3, 3, 224, 224))
        self.assertIsInstance(outs, tuple)
        self.assertEqual(len(outs), 1)
        feat = outs[-1]
        self.assertEqual(feat.shape, (3, 1024, 7, 7))

        # test multiple output indices
        cfg = deepcopy(self.cfg)
        cfg['out_indices'] = (0, 1, 2, 3)
        model = SwinTransformer(**cfg)
        outs = model(imgs)
        self.assertIsInstance(outs, tuple)
        self.assertEqual(len(outs), 4)
        for stride, out in zip([2, 4, 8, 8], outs):
            self.assertEqual(out.shape,
                             (3, 128 * stride, 56 // stride, 56 // stride))

        # test with checkpoint forward
        cfg = deepcopy(self.cfg)
        cfg['with_cp'] = True
        model = SwinTransformer(**cfg)
        for m in model.modules():
            if isinstance(m, SwinBlock):
                self.assertTrue(m.with_cp)
        model.init_weights()
        model.train()

        outs = model(imgs)
        self.assertIsInstance(outs, tuple)
        self.assertEqual(len(outs), 1)
        feat = outs[-1]
        self.assertEqual(feat.shape, (3, 1024, 7, 7))

        # test with dynamic input shape
        imgs1 = torch.randn(3, 3, 224, 224)
        imgs2 = torch.randn(3, 3, 256, 256)
        imgs3 = torch.randn(3, 3, 256, 309)
        cfg = deepcopy(self.cfg)
        model = SwinTransformer(**cfg)
        for imgs in [imgs1, imgs2, imgs3]:
            outs = model(imgs)
            self.assertIsInstance(outs, tuple)
            self.assertEqual(len(outs), 1)
            feat = outs[-1]
            expect_feat_shape = (math.ceil(imgs.shape[2] / 32),
                                 math.ceil(imgs.shape[3] / 32))
            self.assertEqual(feat.shape, (3, 1024, *expect_feat_shape))
Пример #3
0
def test_forward():
    # Test tiny arch forward
    model = SwinTransformer(arch='Tiny')
    model.init_weights()
    model.train()

    imgs = torch.randn(1, 3, 224, 224)
    output = model(imgs)
    assert len(output) == 1
    assert output[0].shape == (1, 768, 7, 7)

    # Test small arch forward
    model = SwinTransformer(arch='small')
    model.init_weights()
    model.train()

    imgs = torch.randn(1, 3, 224, 224)
    output = model(imgs)
    assert len(output) == 1
    assert output[0].shape == (1, 768, 7, 7)

    # Test base arch forward
    model = SwinTransformer(arch='B')
    model.init_weights()
    model.train()

    imgs = torch.randn(1, 3, 224, 224)
    output = model(imgs)
    assert len(output) == 1
    assert output[0].shape == (1, 1024, 7, 7)

    # Test large arch forward
    model = SwinTransformer(arch='l')
    model.init_weights()
    model.train()

    imgs = torch.randn(1, 3, 224, 224)
    output = model(imgs)
    assert len(output) == 1
    assert output[0].shape == (1, 1536, 7, 7)

    # Test base arch with window_size=12, image_size=384
    model = SwinTransformer(arch='base',
                            img_size=384,
                            stage_cfgs=dict(block_cfgs=dict(window_size=12)))
    model.init_weights()
    model.train()

    imgs = torch.randn(1, 3, 384, 384)
    output = model(imgs)
    assert len(output) == 1
    assert output[0].shape == (1, 1024, 12, 12)

    # Test base arch with with checkpoint forward
    model = SwinTransformer(arch='B', with_cp=True)
    for m in model.modules():
        if isinstance(m, SwinBlock):
            assert m.with_cp
    model.init_weights()
    model.train()

    imgs = torch.randn(1, 3, 224, 224)
    output = model(imgs)
    assert len(output) == 1
    assert output[0].shape == (1, 1024, 7, 7)