def _init_layers(self):
     """Initialize layers of the head."""
     self.relu = nn.ReLU(inplace=True)
     self.cls_convs = nn.ModuleList()
     self.reg_convs = nn.ModuleList()
     for i in range(self.stacked_convs):
         chn = self.in_channels if i == 0 else self.feat_channels
         self.cls_convs.append(
             ConvModule(chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        conv_cfg=self.conv_cfg,
                        norm_cfg=self.norm_cfg))
         self.reg_convs.append(
             ConvModule(chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        conv_cfg=self.conv_cfg,
                        norm_cfg=self.norm_cfg))
     assert self.num_anchors == 1, "anchor free version"
     self.gfl_cls = nn.Conv2d(self.feat_channels,
                              self.cls_out_channels,
                              3,
                              padding=1)
     self.gfl_reg = nn.Conv2d(self.feat_channels,
                              4 * (self.reg_max + 1),
                              3,
                              padding=1)
     self.scales = nn.ModuleList(
         [Scale(1.0) for _ in self.anchor_generator.strides])
Exemplo n.º 2
0
 def __init__(self, in_channels):
     super(CrissCrossAttention, self).__init__()
     self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
     self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
     self.value_conv = nn.Conv2d(in_channels, in_channels, 1)
     self.gamma = Scale(0.)
     self.in_channels = in_channels
Exemplo n.º 3
0
    def _init_layers(self):
        self.cls_convs = nn.ModuleList()
        self.reg_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            chn = self.in_channels if i == 0 else self.feat_channels
            if (self.dcn_on_last_conv and i == self.stacked_convs - 1):
                conv_cfg = dict(type='DCNv2')
            else:
                conv_cfg = self.conv_cfg
            self.cls_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=conv_cfg,
                           norm_cfg=self.norm_cfg,
                           bias=self.conv_bias))
            self.reg_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=conv_cfg,
                           norm_cfg=self.norm_cfg,
                           bias=self.conv_bias))
        self.fcos_cls = nn.Conv2d(self.feat_channels,
                                  self.cls_out_channels,
                                  3,
                                  padding=1)
        self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
        self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)

        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def test_fcos_head_forward_single():
    """Test fcos forward single in torch and ort env."""
    fcos_model = fcos_config()

    feat = torch.rand(1, fcos_model.in_channels, 32, 32)
    fcos_model.forward_single = partial(fcos_model.forward_single,
                                        scale=Scale(1.0).requires_grad_(False),
                                        stride=(4, ))
    ort_validate(fcos_model.forward_single, feat)
Exemplo n.º 5
0
    def _init_layers(self):
        """Initialize layers of the head."""
        self.relu = nn.ReLU(inplace=True)
        self.mlvl_cls_convs = nn.ModuleList()
        self.mlvl_reg_convs = nn.ModuleList()
        self.mlvl_atss_cls_convs = nn.ModuleList()
        self.mlvl_atss_reg_convs = nn.ModuleList()
        self.mlvl_atss_centerness_convs = nn.ModuleList()

        for level in range(self.num_out):
            cls_convs = nn.ModuleList()
            reg_convs = nn.ModuleList()
            for i in range(self.stacked_convs):
                chn = self.in_channels if i == 0 else self.feat_channels
                cls_convs.append(
                    ConvModule(chn,
                               self.feat_channels,
                               3,
                               stride=1,
                               padding=1,
                               conv_cfg=self.conv_cfg,
                               norm_cfg=self.norm_cfg))
                reg_convs.append(
                    ConvModule(chn,
                               self.feat_channels,
                               3,
                               stride=1,
                               padding=1,
                               conv_cfg=self.conv_cfg,
                               norm_cfg=self.norm_cfg))

            atss_cls = nn.Conv2d(self.feat_channels,
                                 self.num_anchors * self.cls_out_channels,
                                 1,
                                 padding=0)
            atss_reg = nn.Conv2d(self.feat_channels,
                                 self.num_anchors * 4,
                                 1,
                                 padding=0)
            atss_centerness = nn.Conv2d(self.feat_channels,
                                        self.num_anchors * 1,
                                        1,
                                        padding=0)

            self.mlvl_cls_convs.append(cls_convs)
            self.mlvl_reg_convs.append(reg_convs)
            self.mlvl_atss_cls_convs.append(atss_cls)
            self.mlvl_atss_reg_convs.append(atss_reg)
            self.mlvl_atss_centerness_convs.append(atss_centerness)

        if self.scale:
            self.scales = nn.ModuleList([
                Scale(float(self.scale)) for _ in self.anchor_generator.strides
            ])
        else:
            self.scales = nn.ModuleList(
                [nn.Identity() for _ in self.anchor_generator.strides])
Exemplo n.º 6
0
 def _init_layers(self):
     """Initialize layers of the head."""
     super()._init_layers()
     self.conv_centerness_prev = self._init_branch(
         conv_channels=self.centerness_branch,
         conv_strides=(1, ) * len(self.centerness_branch))
     self.conv_centerness = nn.Conv2d(self.centerness_branch[-1], 1, 1)
     self.scales = nn.ModuleList([
         nn.ModuleList([Scale(1.0) for _ in range(3)]) for _ in self.strides
     ])  # only for offset, depth and size regression
Exemplo n.º 7
0
    def _init_layers(self):
        self.cls_convs = nn.ModuleList()
        self.wh_convs = nn.ModuleList()
        self.offset_convs = nn.ModuleList()
        self.rot_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            chn = self.in_channels if i == 0 else self.feat_channels
            self.cls_convs.append(
                ConvModule(
                    chn,
                    self.feat_channels,
                    3,
                    stride=1,
                    padding=1,
                    conv_cfg=self.conv_cfg,
                    norm_cfg=self.norm_cfg,
                    bias=self.norm_cfg is None))
            self.wh_convs.append(
                ConvModule(
                    chn,
                    self.feat_channels,
                    3,
                    stride=1,
                    padding=1,
                    conv_cfg=self.conv_cfg,
                    norm_cfg=self.norm_cfg,
                    bias=self.norm_cfg is None))
            self.offset_convs.append(
                ConvModule(
                    chn,
                    self.feat_channels,
                    3,
                    stride=1,
                    padding=1,
                    conv_cfg=self.conv_cfg,
                    norm_cfg=self.norm_cfg,
                    bias=self.norm_cfg is None))
            self.rot_convs.append(
                ConvModule(
                    chn,
                    self.feat_channels,
                    3,
                    stride=1,
                    padding=1,
                    conv_cfg=self.conv_cfg,
                    norm_cfg=self.norm_cfg,
                    bias=self.norm_cfg is None))

        self.center_hm = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1, bias=True)
        self.center_wh = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)
        self.center_offset = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)
        self.center_rot = nn.Conv2d(self.feat_channels, 1, 3, padding=1, bias=True)
        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
Exemplo n.º 8
0
 def _init_layers(self):
     """Initialize layers of the head."""
     super()._init_layers()
     #self._init_reid_convs()
     self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
     #self.conv_reid = nn.Conv2d(self.feat_channels, self.feat_channels, 3, padding=1)
     self.conv_reid = DeformConv2dPack(self.feat_channels, self.feat_channels, 3, 1, 1)
     #num_person = 359
     num_person = 5532
     #self.classifier_reid = nn.Linear(self.feat_channels, num_person)
     self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
     self.labeled_matching_layer = LabeledMatchingLayer(num_persons=num_person) # for mot17half
     self.unlabeled_matching_layer = UnlabeledMatchingLayer()
Exemplo n.º 9
0
    def _init_layers(self):
        """Initialize layers of the head."""
        super(FCOSHead, self)._init_cls_convs()
        super(FCOSHead, self)._init_reg_convs()
        self.relu = nn.ReLU(inplace=True)
        self.vfnet_reg_conv = ConvModule(self.feat_channels,
                                         self.feat_channels,
                                         3,
                                         stride=1,
                                         padding=1,
                                         conv_cfg=self.conv_cfg,
                                         norm_cfg=self.norm_cfg,
                                         bias=self.conv_bias)
        self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])

        self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
        self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])

        self.vfnet_cls = nn.Conv2d(self.feat_channels,
                                   self.cls_out_channels,
                                   3,
                                   padding=1)
Exemplo n.º 10
0
    def _init_layers(self):
        self.cls_convs = nn.ModuleList()
        self.reg_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            chn = self.in_channels if i == 0 else self.feat_channels
            self.cls_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=self.conv_cfg,
                           norm_cfg=self.norm_cfg,
                           bias=self.norm_cfg is None))
            self.reg_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=self.conv_cfg,
                           norm_cfg=self.norm_cfg,
                           bias=self.norm_cfg is None))
        self.fcos_cls = nn.Conv2d(self.feat_channels,
                                  self.cls_out_channels,
                                  3,
                                  padding=1)
        self.fcos_reg = nn.Conv2d(self.feat_channels, 34, 3, padding=1)
        self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
        self.scales_1 = nn.ModuleList([Scale(1.0) for _ in self.strides])
        self.scales_2 = nn.ModuleList([Scale(1.0) for _ in self.strides])

        self.feature_adaption = FeatureAdaption(256,
                                                256,
                                                kernel_size=3,
                                                deform_groups=4)
        self.fcos_refine_out = nn.Conv2d(self.feat_channels, 34, 3, padding=1)
Exemplo n.º 11
0
    def _init_layers(self):
        """Initialize layers of the head."""
        self.relu = nn.ReLU(inplace=True)
        self.inter_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            if i < self.num_dcn:
                conv_cfg = dict(type='DCNv2', deform_groups=4)
            else:
                conv_cfg = self.conv_cfg
            chn = self.in_channels if i == 0 else self.feat_channels
            self.inter_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=conv_cfg,
                           norm_cfg=self.norm_cfg))

        self.cls_decomp = TaskDecomposition(self.feat_channels,
                                            self.stacked_convs,
                                            self.stacked_convs * 8,
                                            self.conv_cfg, self.norm_cfg)
        self.reg_decomp = TaskDecomposition(self.feat_channels,
                                            self.stacked_convs,
                                            self.stacked_convs * 8,
                                            self.conv_cfg, self.norm_cfg)

        self.tood_cls = nn.Conv2d(self.feat_channels,
                                  self.num_base_priors * self.cls_out_channels,
                                  3,
                                  padding=1)
        self.tood_reg = nn.Conv2d(self.feat_channels,
                                  self.num_base_priors * 4,
                                  3,
                                  padding=1)

        self.cls_prob_module = nn.Sequential(
            nn.Conv2d(self.feat_channels * self.stacked_convs,
                      self.feat_channels // 4, 1), nn.ReLU(inplace=True),
            nn.Conv2d(self.feat_channels // 4, 1, 3, padding=1))
        self.reg_offset_module = nn.Sequential(
            nn.Conv2d(self.feat_channels * self.stacked_convs,
                      self.feat_channels // 4, 1), nn.ReLU(inplace=True),
            nn.Conv2d(self.feat_channels // 4, 4 * 2, 3, padding=1))

        self.scales = nn.ModuleList(
            [Scale(1.0) for _ in self.prior_generator.strides])
 def _init_layers(self):
     """Initialize layers of the head."""
     super()._init_layers()
     #self._init_reid_convs()
     self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
     #self.conv_reid = nn.Conv2d(self.feat_channels, self.feat_channels, 3, padding=1)
     num_person = 483
     # num_person = 5532
     queue_size = 500
     # queue_size = 5000
     #self.classifier_reid = nn.Linear(self.feat_channels, num_person)
     self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
     self.labeled_matching_layer = LabeledMatchingLayerQueue(
         num_persons=num_person, feat_len=self.in_channels)  # for mot17half
     self.unlabeled_matching_layer = UnlabeledMatchingLayer(
         queue_size=queue_size, feat_len=self.in_channels)
Exemplo n.º 13
0
    def _init_layers(self):
        """Initialize layers of the head."""
        self.relu = nn.ReLU(inplace=True)
        self.cls_convs = nn.ModuleList()
        self.reg_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            chn = self.in_channels if i == 0 else self.feat_channels
            self.cls_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=dict(type='DCN', deform_groups=1)
                           if i == 0 and self.use_dcn else self.conv_cfg,
                           norm_cfg=self.norm_cfg))
            self.reg_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=dict(type='DCN', deform_groups=1)
                           if i == 0 and self.use_dcn else self.conv_cfg,
                           norm_cfg=self.norm_cfg))
        self.atss_cls = nn.Conv2d(self.feat_channels,
                                  self.num_base_priors * self.cls_out_channels,
                                  3,
                                  padding=1)
        self.atss_reg = nn.Conv2d(self.feat_channels,
                                  self.num_base_priors * 4,
                                  3,
                                  padding=1)
        self.atss_iou = nn.Conv2d(self.feat_channels,
                                  self.num_base_priors * 1,
                                  3,
                                  padding=1)
        self.scales = nn.ModuleList(
            [Scale(1.0) for _ in self.prior_generator.strides])

        # we use the global list in loss
        self.cls_num_pos_samples_per_level = [
            0. for _ in range(len(self.prior_generator.strides))
        ]
        self.reg_num_pos_samples_per_level = [
            0. for _ in range(len(self.prior_generator.strides))
        ]
Exemplo n.º 14
0
 def _init_layers(self):
     """Initialize layers of the head."""
     self.relu = nn.ReLU(inplace=True)
     conv_strides = [0] if self.strides_share else self.anchor_generator.strides
     self.cls_stride_convs = nn.ModuleDict()
     self.reg_stride_convs = nn.ModuleDict()
     self.stride_cls = nn.ModuleDict()
     self.stride_reg = nn.ModuleDict()
     if self.use_kps:
         self.stride_kps = nn.ModuleDict()
     for stride_idx, conv_stride in enumerate(conv_strides):
         #print('create convs for stride:', conv_stride)
         key = str(conv_stride)
         cls_convs = nn.ModuleList()
         reg_convs = nn.ModuleList()
         stacked_convs = self.stacked_convs[stride_idx] if isinstance(self.stacked_convs, (list, tuple)) else self.stacked_convs
         feat_mult = self.feat_mults[stride_idx] if self.feat_mults is not None else 1
         feat_ch = int(self.feat_channels*feat_mult)
         for i in range(stacked_convs):
             chn = self.in_channels if i == 0 else last_feat_ch
             cls_convs.append( self._get_conv_module(chn, feat_ch) )
             if not self.cls_reg_share:
                 reg_convs.append( self._get_conv_module(chn, feat_ch) )
             last_feat_ch = feat_ch
         self.cls_stride_convs[key] = cls_convs
         self.reg_stride_convs[key] = reg_convs
         self.stride_cls[key] = nn.Conv2d(
             feat_ch, self.cls_out_channels * self.num_anchors, 3, padding=1)
         if not self.use_dfl:
             self.stride_reg[key] = nn.Conv2d(
                 feat_ch, 4 * self.num_anchors, 3, padding=1)
         else:
             self.stride_reg[key] = nn.Conv2d(
                 feat_ch, 4 * (self.reg_max + 1) * self.num_anchors, 3, padding=1)
         if self.use_kps:
             self.stride_kps[key] = nn.Conv2d(
                 feat_ch, self.NK*2*self.num_anchors, 3, padding=1)
     #assert self.num_anchors == 1, 'anchor free version'
     #extra_gflops /= 1e9
     #print('extra_gflops: %.6fG'%extra_gflops)
     if self.use_scale:
         self.scales = nn.ModuleList(
             [Scale(1.0) for _ in self.anchor_generator.strides])
     else:
         self.scales = [None for _ in self.anchor_generator.strides]
Exemplo n.º 15
0
    def _init_layers(self):
        self.cls_convs = nn.ModuleList()
        self.reg_convs = nn.ModuleList()
        for _ in self.anchor_generator.strides:
            cls_convs, reg_convs = self._build_not_shared_head()
            self.cls_convs.append(cls_convs)
            self.reg_convs.append(reg_convs)
        self.scales = nn.ModuleList(
            [Scale(1.0) for _ in self.anchor_generator.strides])
        self.gfl_cls = nn.ModuleList([nn.Conv2d(self.feat_channels,
                                                self.cls_out_channels +
                                                4 * (self.reg_max + 1) if self.share_cls_reg else self.cls_out_channels,
                                                1,
                                                padding=0) for _ in self.anchor_generator.strides])

        self.gfl_reg = nn.ModuleList([nn.Conv2d(self.feat_channels,
                                                4 * (self.reg_max + 1),
                                                1,
                                                padding=0) for _ in self.anchor_generator.strides])
Exemplo n.º 16
0
 def _init_layers(self):
     """Initialize layers of the head."""
     self.relu = nn.ReLU(inplace=True)
     self.cls_convs = nn.ModuleList()
     self.reg_convs = nn.ModuleList()
     for i in range(self.stacked_convs):
         chn = self.in_channels if i == 0 else self.feat_channels
         self.cls_convs.append(
             ConvModule(
                 chn,
                 self.feat_channels,
                 3,
                 stride=1,
                 padding=1,
                 conv_cfg=self.conv_cfg,
                 norm_cfg=self.norm_cfg))
         self.reg_convs.append(
             ConvModule(
                 chn,
                 self.feat_channels,
                 3,
                 stride=1,
                 padding=1,
                 conv_cfg=self.conv_cfg,
                 norm_cfg=self.norm_cfg))
     pred_pad_size = self.pred_kernel_size // 2
     self.atss_cls = nn.Conv2d(
         self.feat_channels,
         self.num_anchors * self.cls_out_channels,
         self.pred_kernel_size,
         padding=pred_pad_size)
     self.atss_reg = nn.Conv2d(
         self.feat_channels,
         self.num_base_priors * 4,
         self.pred_kernel_size,
         padding=pred_pad_size)
     self.atss_centerness = nn.Conv2d(
         self.feat_channels,
         self.num_base_priors * 1,
         self.pred_kernel_size,
         padding=pred_pad_size)
     self.scales = nn.ModuleList(
         [Scale(1.0) for _ in self.prior_generator.strides])
Exemplo n.º 17
0
    def __init__(self, in_channels, channels):
        super(PAM, self).__init__(key_in_channels=in_channels,
                                  query_in_channels=in_channels,
                                  channels=channels,
                                  out_channels=in_channels,
                                  share_key_query=False,
                                  query_downsample=None,
                                  key_downsample=None,
                                  key_query_num_convs=1,
                                  key_query_norm=False,
                                  value_out_num_convs=1,
                                  value_out_norm=False,
                                  matmul_norm=False,
                                  with_out=False,
                                  conv_cfg=None,
                                  norm_cfg=None,
                                  act_cfg=None)

        self.gamma = Scale(0)
Exemplo n.º 18
0
    def _init_layers(self):
        """Initialize layers of the head."""
        dconv3x3_config = dict(type='DCNv2',
                               kernel_size=3,
                               use_bias=True,
                               deformable_groups=2,
                               padding=1)
        conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
        conv1x1_config = dict(type='Conv', kernel_size=1)

        self.arch_config = [
            dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
        ]
        self.cls_convs = nn.ModuleList()
        self.reg_convs = nn.ModuleList()
        for i, op_ in enumerate(self.arch_config):
            op = copy.deepcopy(op_)
            chn = self.in_channels if i == 0 else self.feat_channels
            assert isinstance(op, dict)
            use_bias = op.pop('use_bias', False)
            padding = op.pop('padding', 0)
            kernel_size = op.pop('kernel_size')
            module = ConvModule(chn,
                                self.feat_channels,
                                kernel_size,
                                stride=1,
                                padding=padding,
                                norm_cfg=self.norm_cfg,
                                bias=use_bias,
                                conv_cfg=op)

            self.cls_convs.append(copy.deepcopy(module))
            self.reg_convs.append(copy.deepcopy(module))

        self.fcos_cls = nn.Conv2d(self.feat_channels,
                                  self.cls_out_channels,
                                  3,
                                  padding=1)
        self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
        self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)

        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
Exemplo n.º 19
0
    def _init_layers(self):
        """Initialize layers of the head."""
        self.relu = nn.ReLU(inplace=True)
        self.cls_convs = nn.ModuleList()
        self.reg_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            chn = self.in_channels if i == 0 else self.feat_channels
            self.cls_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=self.conv_cfg,
                           norm_cfg=self.norm_cfg))
            self.reg_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=self.conv_cfg,
                           norm_cfg=self.norm_cfg))
        assert self.num_anchors == 1, 'anchor free version'
        if self.one_more_cls_out_channels:
            self.cls_out_channels += 1
        self.gfl_cls = nn.Conv2d(self.feat_channels,
                                 self.cls_out_channels,
                                 3,
                                 padding=1)
        self.gfl_reg = nn.Conv2d(self.feat_channels,
                                 4 * (self.reg_max + 1),
                                 3,
                                 padding=1)
        self.scales = nn.ModuleList(
            [Scale(1.0) for _ in self.prior_generator.strides])

        if self.use_dgqp:
            conf_vector = [nn.Conv2d(4 * self.total_dim, self.reg_channels, 1)]
            conf_vector += [self.relu]
            conf_vector += [nn.Conv2d(self.reg_channels, 1, 1), nn.Sigmoid()]
            self.reg_conf = nn.Sequential(*conf_vector)
Exemplo n.º 20
0
 def _init_layers(self):
     """Initialize layers of the head."""
     self.relu = nn.ReLU(inplace=True)
     self.cls_convs = nn.ModuleList()
     self.reg_convs = nn.ModuleList()
     for i in range(self.stacked_convs):
         chn = self.in_channels if i == 0 else self.feat_channels
         if self.dcn_on_last_conv and i == self.stacked_convs - 1:
             conv_cfg = dict(type='DCNv2')
         else:
             conv_cfg = self.conv_cfg
         self.cls_convs.append(
             ConvModule(chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        conv_cfg=conv_cfg,
                        norm_cfg=self.norm_cfg))
         self.reg_convs.append(
             ConvModule(chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        conv_cfg=conv_cfg,
                        norm_cfg=self.norm_cfg))
     self.atss_cls = nn.Conv2d(self.feat_channels,
                               self.num_anchors * self.cls_out_channels,
                               3,
                               padding=1)
     self.atss_reg = nn.Conv2d(self.feat_channels,
                               self.num_anchors * 4,
                               3,
                               padding=1)
     self.atss_centerness = nn.Conv2d(self.feat_channels,
                                      self.num_anchors * 1,
                                      3,
                                      padding=1)
     self.scales = nn.ModuleList(
         [Scale(1.0) for _ in self.anchor_generator.strides])
Exemplo n.º 21
0
 def _init_layers(self):
     self.relu = nn.ReLU(inplace=True)
     self.cls_convs = nn.ModuleList()
     self.reg_convs = nn.ModuleList()
     for i in range(self.stacked_convs):
         chn = self.in_channels if i == 0 else self.feat_channels
         self.cls_convs.append(
             ConvModule(chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        conv_cfg=self.conv_cfg,
                        norm_cfg=self.norm_cfg,
                        bias=self.norm_cfg is None))
         self.reg_convs.append(
             ConvModule(chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        conv_cfg=self.conv_cfg,
                        norm_cfg=self.norm_cfg,
                        bias=self.norm_cfg is None))
     self.fcos_cls = nn.Conv2d(self.feat_channels,
                               self.cls_out_channels,
                               3,
                               padding=1)
     self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
     self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
     self.fcos_offset_conv = nn.Conv2d(self.feat_channels,
                                       self.feat_channels,
                                       3,
                                       padding=1)
     self.fcos_offset = nn.Conv2d(self.feat_channels, 18, 1, 1, 0)
     self.fcos_cls_conv = DeformConv(self.feat_channels, self.feat_channels,
                                     3, 1, 1)
     self.result = nn.Conv2d(4, 1, 3, padding=1)
     self.norm = nn.BatchNorm2d(1)
     self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
Exemplo n.º 22
0
    def _init_layers(self):
        self.relu = nn.ReLU(inplace=True)
        self.cls_convs = nn.ModuleList()
        self.reg_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            chn = self.in_channels if i == 0 else self.feat_channels

            self.cls_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=self.conv_cfg,
                           norm_cfg=self.norm_cfg))

            self.reg_convs.append(
                ConvModule(chn,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           conv_cfg=self.conv_cfg,
                           norm_cfg=self.norm_cfg))

        self.atss_cls = nn.Conv2d(self.feat_channels,
                                  self.num_anchors * self.cls_out_channels,
                                  3,
                                  padding=1)

        self.atss_reg = nn.Conv2d(self.feat_channels,
                                  self.num_anchors * 5,
                                  3,
                                  padding=1)
        self.atss_centerness = nn.Conv2d(self.feat_channels,
                                         self.num_anchors * 1,
                                         3,
                                         padding=1)
        self.scales = nn.ModuleList([Scale(1.0) for _ in self.anchor_strides])
Exemplo n.º 23
0
    def _init_layers(self):
        self.cls_convs = nn.ModuleList()
        self.reg_convs = nn.ModuleList()

        for i in range(self.stacked_convs):  # 4 * convs
            c = self.in_channels if i == 0 else self.feat_channels  # d1: 88
            # classification convolutional layers
            # waiting add....
            self.cls_convs.append(
                ConvModule(
                    c,  # input channels
                    self.
                    feat_channels,  # output channels KEEP SAME INPUT & OUTPUT SIZE
                    3,  # 3 x 3 kernel size
                    stride=1,
                    padding=1,
                    norm_cfg=self.norm_cfg,
                    bias=self.norm_cfg is None))

            self.reg_convs.append(
                ConvModule(c,
                           self.feat_channels,
                           3,
                           stride=1,
                           padding=1,
                           normal_cfg=self.norm_cfg,
                           bias=self.norm_cfg is None))

        self.fcos_cls = nn.Conv2d(self.feat_channels,
                                  self.num_classes,
                                  3,
                                  padding=1)
        self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3,
                                  padding=1)  # *l, *r, *t, *b
        self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3,
                                         padding=1)  # criterion
        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides
                                     ])  # [4, 8, 16, 32, 64]
Exemplo n.º 24
0
 def __init__(self,
              num_classes,
              in_channels,
              stacked_convs=4,
              conv_cfg=None,
              norm_cfg=None,
              anchor_generator=dict(type='AnchorGenerator',
                                    octave_base_scale=4,
                                    scales_per_octave=3,
                                    ratios=[0.5, 1.0, 2.0],
                                    strides=[8, 16, 32, 64, 128]),
              scalar_norm=False,
              **kwargs):
     self.stacked_convs = stacked_convs
     self.conv_cfg = conv_cfg
     self.norm_cfg = norm_cfg
     self.scalar_norm = scalar_norm
     super(ATSSRetinaHead, self).__init__(num_classes,
                                          in_channels,
                                          anchor_generator=anchor_generator,
                                          **kwargs)
     if self.scalar_norm:
         self.scales = nn.ModuleList(
             [Scale(1.0) for _ in self.anchor_generator.strides])
Exemplo n.º 25
0
 def _init_layers(self):
     """Initialize layers of the head."""
     super()._init_layers()
     self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
     self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
Exemplo n.º 26
0
 def __init__(self):
     super(CAM, self).__init__()
     self.gamma = Scale(0)
Exemplo n.º 27
0
    def __init__(self, in_channels, out_channels, conv_cfg, num_stack=1):
        """
        Args:
            num_channels:
            conv_channels:
            first_time: whether the input comes directly from the efficientnet,
                        if True, downchannel it first, and downsample P5 to generate P6 then P7
            epsilon: epsilon of fast weighted attention sum of BiFPN, not the BN's epsilon
            onnx_export: if True, use Swish instead of MemoryEfficientSwish
        """
        super(YeFPN, self).__init__()
        assert isinstance(conv_cfg, dict), "conv_cfg should be dict!"
        conv_type = conv_cfg["type"]
        conv_info = conv_cfg["info"]
        assert isinstance(conv_type, str), "conv_type should be string!"
        # assert isinstance(conv_info, dict), "conv_info should be dict!"

        assert "norm_cfg" in conv_info
        norm_cfg = conv_info["norm_cfg"]

        Conv = build_op(conv_type)

        # Conv layers
        self.lateral_P3 = ConvModule(in_channels[0],
                                     out_channels,
                                     kernel_size=1,
                                     norm_cfg=norm_cfg)
        self.lateral_P4 = ConvModule(in_channels[1],
                                     out_channels,
                                     kernel_size=1,
                                     norm_cfg=norm_cfg)
        self.lateral_P5 = ConvModule(in_channels[2],
                                     out_channels,
                                     kernel_size=1,
                                     norm_cfg=norm_cfg)
        self.lateral_P6 = ConvModule(in_channels[3],
                                     out_channels,
                                     kernel_size=1,
                                     norm_cfg=norm_cfg)
        self.lateral_P7 = ConvModule(in_channels[4],
                                     out_channels,
                                     kernel_size=1,
                                     norm_cfg=norm_cfg)

        # self.conv7_1 = Conv(out_channels, out_channels, kernel_size=3, **conv_info)
        # self.conv6_1 = Conv(out_channels, out_channels, kernel_size=3, **conv_info)
        # self.conv5_1 = Conv(out_channels, out_channels, kernel_size=3, **conv_info)
        # self.conv4_1 = Conv(out_channels, out_channels, kernel_size=3, **conv_info)
        # self.conv3_1 = Conv(out_channels, out_channels, kernel_size=3, **conv_info)

        self.conv7_2 = Conv(out_channels,
                            out_channels,
                            kernel_size=3,
                            **conv_info)
        self.conv6_2 = Conv(out_channels,
                            out_channels,
                            kernel_size=3,
                            **conv_info)
        self.conv5_2 = Conv(out_channels,
                            out_channels,
                            kernel_size=3,
                            **conv_info)
        self.conv4_2 = Conv(out_channels,
                            out_channels,
                            kernel_size=3,
                            **conv_info)
        self.conv3_2 = Conv(out_channels,
                            out_channels,
                            kernel_size=3,
                            **conv_info)

        self.dowm_con6_7 = ConvModule(out_channels,
                                      out_channels,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      norm_cfg=norm_cfg)
        self.dowm_con5_6 = ConvModule(out_channels,
                                      out_channels,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      norm_cfg=norm_cfg)
        self.dowm_con4_5 = ConvModule(out_channels,
                                      out_channels,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      norm_cfg=norm_cfg)
        self.dowm_con3_4 = ConvModule(out_channels,
                                      out_channels,
                                      kernel_size=3,
                                      stride=2,
                                      padding=1,
                                      norm_cfg=norm_cfg)

        self.upsample7_6 = F.interpolate
        self.upsample6_5 = F.interpolate
        self.upsample5_4 = F.interpolate
        self.upsample4_3 = F.interpolate

        self.shortcut6_7 = F.interpolate
        self.shortcut4_3 = F.interpolate

        self.scale7_6 = Scale()
        self.scale6_5 = Scale()
        self.scale5_4 = Scale()
        self.scale4_3 = Scale()

        self.scale6_7 = Scale()
        self.scale5_6 = Scale()
        self.scale4_5 = Scale()
        self.scale3_4 = Scale()
Exemplo n.º 28
0
    def _init_layers(self):
        """Initialize layers of the head."""
        self.relu = nn.ReLU(inplace=True)
        self.topk_conv = nn.ModuleList()
        self.mlvl_cls = nn.ModuleList()
        self.mlvl_reg = nn.ModuleList()
        self.mlvl_gfl_cls = nn.ModuleList()
        self.mlvl_gfl_reg = nn.ModuleList()
        self.mlvl_conf_vector = nn.ModuleList()
        for level in range(self.num_out):
            cls_convs = nn.ModuleList()
            reg_convs = nn.ModuleList()
            for i in range(self.stacked_convs):
                chn = self.in_channels if i == 0 else self.feat_channels
                cls_convs.append(
                    ConvModule(chn,
                               self.feat_channels,
                               3,
                               stride=1,
                               padding=1,
                               conv_cfg=self.conv_cfg,
                               norm_cfg=self.norm_cfg))
                reg_convs.append(
                    ConvModule(chn,
                               self.feat_channels,
                               3,
                               stride=1,
                               padding=1,
                               conv_cfg=self.conv_cfg,
                               norm_cfg=self.norm_cfg))

            gfl_cls = nn.Conv2d(self.feat_channels,
                                self.num_anchors * self.cls_out_channels,
                                1,
                                padding=0)
            gfl_reg = nn.Conv2d(self.feat_channels,
                                4 * (self.reg_max + 1),
                                1,
                                padding=0)

            topk = topk_conv(self.reg_max, self.total_dim)

            conf_vector = [nn.Conv2d(4 * self.total_dim, self.reg_channels, 1)]
            conf_vector += [self.relu]
            conf_vector += [nn.Conv2d(self.reg_channels, 1, 1), nn.Sigmoid()]

            self.topk_conv.append(topk)
            self.mlvl_cls.append(cls_convs)
            self.mlvl_reg.append(reg_convs)
            self.mlvl_gfl_cls.append(gfl_cls)
            self.mlvl_gfl_reg.append(gfl_reg)
            self.mlvl_conf_vector.append(nn.Sequential(*conf_vector))

        if self.scales:
            self.scales = nn.ModuleList([
                Scale(float(self.scales))
                for _ in self.anchor_generator.strides
            ])
        else:
            self.scales = nn.ModuleList(
                [nn.Identity() for _ in self.anchor_generator.strides])
Exemplo n.º 29
0
    def _init_layers(self):
        self.cls_convs = nn.ModuleList()
        self.reg_convs = nn.ModuleList()
        self.mask_convs = nn.ModuleList()
        for i in range(self.stacked_convs):
            chn = self.in_channels if i == 0 else self.feat_channels
            if not self.use_dcn:
                self.cls_convs.append(
                    ConvModule(
                        chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        conv_cfg=self.conv_cfg,
                        norm_cfg=self.norm_cfg,
                        bias=self.norm_cfg is None))
                self.reg_convs.append(
                    ConvModule(
                        chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        conv_cfg=self.conv_cfg,
                        norm_cfg=self.norm_cfg,
                        bias=self.norm_cfg is None))
                self.mask_convs.append(
                    ConvModule(
                        chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        conv_cfg=self.conv_cfg,
                        norm_cfg=self.norm_cfg,
                        bias=self.norm_cfg is None))
            else:
                self.cls_convs.append(
                    ModulatedDeformConv2dPack(
                        chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        dilation=1,
                        deformable_groups=1,
                    ))
                if self.norm_cfg:
                    self.cls_convs.append(build_norm_layer(
                        self.norm_cfg, self.feat_channels)[1])
                self.cls_convs.append(nn.ReLU(inplace=True))

                self.reg_convs.append(
                    ModulatedDeformConv2dPack(
                        chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        dilation=1,
                        deformable_groups=1,
                    ))
                if self.norm_cfg:
                    self.reg_convs.append(build_norm_layer(
                        self.norm_cfg, self.feat_channels)[1])
                self.reg_convs.append(nn.ReLU(inplace=True))

                self.mask_convs.append(
                    ModulatedDeformConv2dPack(
                        chn,
                        self.feat_channels,
                        3,
                        stride=1,
                        padding=1,
                        dilation=1,
                        deformable_groups=1,
                    ))
                if self.norm_cfg:
                    self.mask_convs.append(build_norm_layer(
                        self.norm_cfg, self.feat_channels)[1])
                self.mask_convs.append(nn.ReLU(inplace=True))

        self.polar_cls = nn.Conv2d(
            self.feat_channels, self.cls_out_channels, 3, padding=1)
        self.polar_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
        self.polar_mask = nn.Conv2d(self.feat_channels, 36, 3, padding=1)
        self.polar_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)

        self.scales_bbox = nn.ModuleList([Scale(1.0) for _ in self.strides])
        self.scales_mask = nn.ModuleList([Scale(1.0) for _ in self.strides])
    def _init_layers(self):
        """Initialize layers of the head."""
        self.relu = nn.ReLU(inplace=True)
        self.mlvl_cls_convs = nn.ModuleList()
        self.mlvl_reg_convs = nn.ModuleList()
        self.mlvl_vfl_cls_convs = nn.ModuleList()
        self.mlvl_vfl_reg_convs = nn.ModuleList()
        self.mlvl_vfl_reg = nn.ModuleList()
        self.mlvl_vfl_refine_convs = nn.ModuleList()
        self.mlvl_scale = nn.ModuleList()
        self.mlvl_refine_scale = nn.ModuleList()
        if self.reg_cls_branch:
            self.mlvl_vfl_reg_cls = nn.ModuleList()

        for level in range(self.num_out):
            cls_convs = nn.ModuleList()
            reg_convs = nn.ModuleList()
            for i in range(self.stacked_convs):
                chn = self.in_channels if i == 0 else self.feat_channels
                cls_convs.append(
                    ConvModule(chn,
                               self.feat_channels,
                               3,
                               stride=1,
                               padding=1,
                               conv_cfg=self.conv_cfg,
                               norm_cfg=self.norm_cfg))
                reg_convs.append(
                    ConvModule(chn,
                               self.feat_channels,
                               3,
                               stride=1,
                               padding=1,
                               conv_cfg=self.conv_cfg,
                               norm_cfg=self.norm_cfg))

            vfl_cls_convs = nn.Conv2d(self.feat_channels,
                                      self.cls_out_channels,
                                      1,
                                      padding=0)
            vfl_reg_convs = ConvModule(self.feat_channels,
                                       self.feat_channels,
                                       3,
                                       stride=1,
                                       padding=1,
                                       conv_cfg=self.conv_cfg,
                                       norm_cfg=self.norm_cfg,
                                       bias=self.conv_bias)

            vfl_refine_convs = nn.Conv2d(self.feat_channels, 4, 1, padding=0)
            vfl_reg = nn.Conv2d(self.feat_channels, 4, 1, padding=0)
            vfl_reg_cls = nn.Conv2d(self.feat_channels,
                                    self.cls_out_channels,
                                    1,
                                    padding=0)

            scale = Scale(1.0)
            scale_refine = Scale(1.0)

            self.mlvl_cls_convs.append(cls_convs)
            self.mlvl_reg_convs.append(reg_convs)
            self.mlvl_vfl_cls_convs.append(vfl_cls_convs)
            self.mlvl_vfl_reg_convs.append(vfl_reg_convs)
            self.mlvl_vfl_refine_convs.append(vfl_refine_convs)
            self.mlvl_vfl_reg.append(vfl_reg)
            if self.reg_cls_branch:
                self.mlvl_vfl_reg_cls.append(vfl_reg_cls)

            self.mlvl_scale.append(scale)
            self.mlvl_refine_scale.append(scale_refine)