示例#1
0
 def _init_layers(self):
     self.relu = nn.ReLU(inplace=True)
     self.conv_loc = nn.Conv2d(self.in_channels, 1, 1)
     self.conv_shape = nn.Conv2d(self.in_channels, self.num_anchors * 2, 1)
     self.feature_adaption = FeatureAdaption(
         self.in_channels,
         self.feat_channels,
         kernel_size=3,
         deform_groups=self.deform_groups)
     self.conv_cls = MaskedConv2d(self.feat_channels,
                                  self.num_anchors * self.cls_out_channels,
                                  1)
     self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4,
                                  1)
示例#2
0
    def _init_layers(self):
        """Initialize layers of the head."""
        self.relu = nn.ReLU(inplace=True)
        self.cls_convs = nn.ModuleList()
        self.reg_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            chn = self.in_channels if i == 0 else self.feat_channels
            self.cls_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=self.conv_cfg,
                           norm_cfg=self.norm_cfg))
            self.reg_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=self.conv_cfg,
                           norm_cfg=self.norm_cfg))

        self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
        self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,
                                    1)
        self.feature_adaption_cls = FeatureAdaption(
            self.feat_channels,
            self.feat_channels,
            kernel_size=3,
            deform_groups=self.deform_groups)
        self.feature_adaption_reg = FeatureAdaption(
            self.feat_channels,
            self.feat_channels,
            kernel_size=3,
            deform_groups=self.deform_groups)
        self.retina_cls = MaskedConv2d(self.feat_channels,
                                       self.num_base_priors *
                                       self.cls_out_channels,
                                       3,
                                       padding=1)
        self.retina_reg = MaskedConv2d(self.feat_channels,
                                       self.num_base_priors * 4,
                                       3,
                                       padding=1)
示例#3
0
 def test_masked_conv2d(self):
     if not torch.cuda.is_available():
         return
     from mmcv.ops import MaskedConv2d
     input = torch.randn(1, 3, 16, 16, requires_grad=True, device='cuda')
     mask = torch.randn(1, 16, 16, requires_grad=True, device='cuda')
     conv = MaskedConv2d(3, 3, 3).cuda()
     output = conv(input, mask)
     assert output is not None