def __init__(self): super(Net3, self).__init__() cfg = mmcv.Config.fromfile('/home/liuziming/mmdetection/configs/rpn_r50_fpn_1x.py') # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True self.RPN = builder.build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) self.backbone = ResNet(50,4,frozen_stages=1,) self.init_weights(pretrained='modelzoo://resnet50') self.relation = SelfAttention(2,256,256,256) self.fc = nn.Linear(256*2,40) self.avgpool = nn.AdaptiveAvgPool2d((1,1))
def test_resnest_stem(): # Test default stem_channels model = ResNet(50) assert model.stem_channels == 64 assert model.conv1.out_channels == 64 assert model.norm1.num_features == 64 # Test default stem_channels, with base_channels=3 model = ResNet(50, base_channels=3) assert model.stem_channels == 3 assert model.conv1.out_channels == 3 assert model.norm1.num_features == 3 assert model.layer1[0].conv1.in_channels == 3 # Test stem_channels=3 model = ResNet(50, stem_channels=3) assert model.stem_channels == 3 assert model.conv1.out_channels == 3 assert model.norm1.num_features == 3 assert model.layer1[0].conv1.in_channels == 3 # Test stem_channels=3, with base_channels=2 model = ResNet(50, stem_channels=3, base_channels=2) assert model.stem_channels == 3 assert model.conv1.out_channels == 3 assert model.norm1.num_features == 3 assert model.layer1[0].conv1.in_channels == 3 # Test V1d stem_channels model = ResNetV1d(depth=50, stem_channels=6) model.train() assert model.stem[0].out_channels == 3 assert model.stem[1].num_features == 3 assert model.stem[3].out_channels == 3 assert model.stem[4].num_features == 3 assert model.stem[6].out_channels == 6 assert model.stem[7].num_features == 6 assert model.layer1[0].conv1.in_channels == 6
class Net3(nn.Module): def __init__(self): super(Net3, self).__init__() cfg = mmcv.Config.fromfile('/home/liuziming/mmdetection/configs/rpn_r50_fpn_1x.py') # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True self.RPN = builder.build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg) self.backbone = ResNet(50,4,frozen_stages=1,) self.init_weights(pretrained='modelzoo://resnet50') self.relation = SelfAttention(2,256,256,256) self.fc = nn.Linear(256*2,40) self.avgpool = nn.AdaptiveAvgPool2d((1,1)) def init_weights(self,pretrained=None): super(Net3, self).init_weights(pretrained) load_checkpoint(self.RPN, '/home/share/LabServer/GLnet/MODELZOO/rpn_r50_fpn_2x_20181010-88a4a471.pth') self.backbone.init_weights(pretrained) def forward(self, x): with torch.no_grad(): #return loss 控制训练/测试 #参数 传入 basedetector 的forward result,roi_feats = self.RPN(return_loss=False, rescale=False, **x) roi_feats=self.avgpool(roi_feats) roi_feats = torch.mean(roi_feats,dim=1).view(roi_feats.size(0),-1) assert roi_feats.size(1) ==256 global_feat = self.backbone(x) global_feat = self.avgpool(global_feat).view(global_feat.size(0),-1) assert global_feat.size(1) ==256 combine_feat = global_feat + roi_feats output = self.fc(combine_feat) return output
def test_resnet_backbone(): """Test resnet backbone.""" with pytest.raises(KeyError): # ResNet depth should be in [18, 34, 50, 101, 152] ResNet(20) with pytest.raises(AssertionError): # In ResNet: 1 <= num_stages <= 4 ResNet(50, num_stages=0) with pytest.raises(AssertionError): # len(stage_with_dcn) == num_stages dcn = dict(type='DCN', deformable_groups=1, fallback_on_stride=False) ResNet(50, dcn=dcn, stage_with_dcn=(True, )) with pytest.raises(AssertionError): # len(stage_with_plugin) == num_stages plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True), position='after_conv3') ] ResNet(50, plugins=plugins) with pytest.raises(AssertionError): # In ResNet: 1 <= num_stages <= 4 ResNet(50, num_stages=5) with pytest.raises(AssertionError): # len(strides) == len(dilations) == num_stages ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) with pytest.raises(TypeError): # pretrained must be a string path model = ResNet(50) model.init_weights(pretrained=0) with pytest.raises(AssertionError): # Style must be in ['pytorch', 'caffe'] ResNet(50, style='tensorflow') # Test ResNet50 norm_eval=True model = ResNet(50, norm_eval=True) model.init_weights() model.train() assert check_norm_state(model.modules(), False) # Test ResNet50 with torchvision pretrained weight model = ResNet(depth=50, norm_eval=True) model.init_weights('torchvision://resnet50') model.train() assert check_norm_state(model.modules(), False) # Test ResNet50 with first stage frozen frozen_stages = 1 model = ResNet(50, frozen_stages=frozen_stages) model.init_weights() model.train() assert model.norm1.training is False for layer in [model.conv1, model.norm1]: for param in layer.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(model, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # Test ResNet50V1d with first stage frozen model = ResNetV1d(depth=50, frozen_stages=frozen_stages) assert len(model.stem) == 9 model.init_weights() model.train() check_norm_state(model.stem, False) for param in model.stem.parameters(): assert param.requires_grad is False for i in range(1, frozen_stages + 1): layer = getattr(model, f'layer{i}') for mod in layer.modules(): if isinstance(mod, _BatchNorm): assert mod.training is False for param in layer.parameters(): assert param.requires_grad is False # Test ResNet18 forward model = ResNet(18) model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 64, 56, 56]) assert feat[1].shape == torch.Size([1, 128, 28, 28]) assert feat[2].shape == torch.Size([1, 256, 14, 14]) assert feat[3].shape == torch.Size([1, 512, 7, 7]) # Test ResNet18 with checkpoint forward model = ResNet(18, with_cp=True) for m in model.modules(): if is_block(m): assert m.with_cp # Test ResNet50 with BatchNorm forward model = ResNet(50) for m in model.modules(): if is_norm(m): assert isinstance(m, _BatchNorm) model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7]) # Test ResNet50 with layers 1, 2, 3 out forward model = ResNet(50, out_indices=(0, 1, 2)) model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 3 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) # Test ResNet50 with checkpoint forward model = ResNet(50, with_cp=True) for m in model.modules(): if is_block(m): assert m.with_cp model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7]) # Test ResNet50 with GroupNorm forward model = ResNet( 50, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)) for m in model.modules(): if is_norm(m): assert isinstance(m, GroupNorm) model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7]) # Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D # after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4 plugins = [ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, True, True, True), position='after_conv2'), dict(cfg=dict(type='NonLocal2D'), position='after_conv2'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16), stages=(False, True, True, False), position='after_conv3') ] model = ResNet(50, plugins=plugins) for m in model.layer1.modules(): if is_block(m): assert not hasattr(m, 'context_block') assert not hasattr(m, 'gen_attention_block') assert m.nonlocal_block.in_channels == 64 for m in model.layer2.modules(): if is_block(m): assert m.nonlocal_block.in_channels == 128 assert m.gen_attention_block.in_channels == 128 assert m.context_block.in_channels == 512 for m in model.layer3.modules(): if is_block(m): assert m.nonlocal_block.in_channels == 256 assert m.gen_attention_block.in_channels == 256 assert m.context_block.in_channels == 1024 for m in model.layer4.modules(): if is_block(m): assert m.nonlocal_block.in_channels == 512 assert m.gen_attention_block.in_channels == 512 assert not hasattr(m, 'context_block') model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7]) # Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after # conv3 in layers 2, 3, 4 plugins = [ dict( cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1), stages=(False, True, True, False), position='after_conv3'), dict( cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2), stages=(False, True, True, False), position='after_conv3') ] model = ResNet(50, plugins=plugins) for m in model.layer1.modules(): if is_block(m): assert not hasattr(m, 'context_block') assert not hasattr(m, 'context_block1') assert not hasattr(m, 'context_block2') for m in model.layer2.modules(): if is_block(m): assert not hasattr(m, 'context_block') assert m.context_block1.in_channels == 512 assert m.context_block2.in_channels == 512 for m in model.layer3.modules(): if is_block(m): assert not hasattr(m, 'context_block') assert m.context_block1.in_channels == 1024 assert m.context_block2.in_channels == 1024 for m in model.layer4.modules(): if is_block(m): assert not hasattr(m, 'context_block') assert not hasattr(m, 'context_block1') assert not hasattr(m, 'context_block2') model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7]) # Test ResNet50 zero initialization of residual model = ResNet(50, zero_init_residual=True) model.init_weights() for m in model.modules(): if isinstance(m, Bottleneck): assert all_zeros(m.norm3) elif isinstance(m, BasicBlock): assert all_zeros(m.norm2) model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7]) # Test ResNetV1d forward model = ResNetV1d(depth=50) model.init_weights() model.train() imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7]) # Test ResNet50 stem_channels model = ResNet(depth=50, stem_channels=128) model.init_weights() model.train() assert model.conv1.out_channels == 128 assert model.layer1[0].conv1.in_channels == 128 imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7]) # Test ResNet50V1d stem_channels model = ResNetV1d(depth=50, stem_channels=128) model.init_weights() model.train() assert model.stem[0].out_channels == 64 assert model.stem[3].out_channels == 64 assert model.stem[6].out_channels == 128 assert model.layer1[0].conv1.in_channels == 128 imgs = torch.randn(1, 3, 224, 224) feat = model(imgs) assert len(feat) == 4 assert feat[0].shape == torch.Size([1, 256, 56, 56]) assert feat[1].shape == torch.Size([1, 512, 28, 28]) assert feat[2].shape == torch.Size([1, 1024, 14, 14]) assert feat[3].shape == torch.Size([1, 2048, 7, 7])
def test_resnest_stem(): # Test default stem_channels model = ResNet(50) assert model.stem_channels == 64 assert model.conv1.out_channels == 64 assert model.norm1.num_features == 64 # Test default stem_channels, with base_channels=32 model = ResNet(50, base_channels=32) assert model.stem_channels == 32 assert model.conv1.out_channels == 32 assert model.norm1.num_features == 32 assert model.layer1[0].conv1.in_channels == 32 # Test stem_channels=64 model = ResNet(50, stem_channels=64) assert model.stem_channels == 64 assert model.conv1.out_channels == 64 assert model.norm1.num_features == 64 assert model.layer1[0].conv1.in_channels == 64 # Test stem_channels=64, with base_channels=32 model = ResNet(50, stem_channels=64, base_channels=32) assert model.stem_channels == 64 assert model.conv1.out_channels == 64 assert model.norm1.num_features == 64 assert model.layer1[0].conv1.in_channels == 64 # Test stem_channels=128 model = ResNet(depth=50, stem_channels=128) model.init_weights() model.train() assert model.conv1.out_channels == 128 assert model.layer1[0].conv1.in_channels == 128 # Test V1d stem_channels model = ResNetV1d(depth=50, stem_channels=128) model.init_weights() model.train() assert model.stem[0].out_channels == 64 assert model.stem[1].num_features == 64 assert model.stem[3].out_channels == 64 assert model.stem[4].num_features == 64 assert model.stem[6].out_channels == 128 assert model.stem[7].num_features == 128 assert model.layer1[0].conv1.in_channels == 128