def _init_layers(self): self.cls_convs = nn.ModuleList() self.ins_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.ins_convs.append( ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.fcos_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1) self.controller = nn.Conv2d(self.feat_channels, self.total_params, 3, padding=1)
def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.retina_cls = nn.Conv2d(self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1) self.retina_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 3, padding=1)
def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN')): super(BasicResBlock, self).__init__() # main path self.conv1 = ConvModule(in_channels, in_channels, kernel_size=3, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg) self.conv2 = ConvModule(in_channels, out_channels, kernel_size=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) # identity path self.conv_identity = ConvModule(in_channels, out_channels, kernel_size=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None) self.relu = nn.ReLU(inplace=True)
def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.gfl_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.gfl_reg = nn.Conv2d( self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.anchor_strides])
def _init_layers(self): self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.fcos_cls = nn.Conv2d( self.feat_channels, self.cls_out_channels, 3, padding=1) self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1) self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
class HTCMaskHead(FCNMaskHead): def __init__(self, with_conv_res=True, *args, **kwargs): super(HTCMaskHead, self).__init__(*args, **kwargs) self.with_conv_res = with_conv_res if self.with_conv_res: self.conv_res = ConvModule(self.conv_out_channels, self.conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) def init_weights(self): super(HTCMaskHead, self).init_weights() if self.with_conv_res: self.conv_res.init_weights() def forward(self, x, res_feat=None, return_logits=True, return_feat=True): if res_feat is not None: assert self.with_conv_res res_feat = self.conv_res(res_feat) x = x + res_feat for conv in self.convs: x = conv(x) res_feat = x outs = [] if return_logits: x = self.upsample(x) if self.upsample_method == 'deconv': x = self.relu(x) mask_pred = self.conv_logits(x) outs.append(mask_pred) if return_feat: outs.append(res_feat) return outs if len(outs) > 1 else outs[0]
def __init__(self, in_channels, out_channels, scale, block_type, norm_cfg=dict(type="BN"), alpha=1.0): super(Resample, self).__init__() self.scale = scale new_in_channels = int(in_channels * alpha) if block_type == Bottleneck: in_channels *= 4 self.squeeze_conv = ConvModule(in_channels, new_in_channels, 1, norm_cfg=norm_cfg) if scale < 1: self.downsample_conv = ConvModule(new_in_channels, new_in_channels, 3, padding=1, stride=2, norm_cfg=norm_cfg) self.expand_conv = ConvModule(new_in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
def _init_layers(self): self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() # box branch for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.reg_convs.append( ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.fovea_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) # cls branch if not self.with_deform: for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.fovea_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1) else: self.cls_convs.append( ConvModule(self.feat_channels, (self.feat_channels * 4), 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.cls_convs.append( ConvModule((self.feat_channels * 4), (self.feat_channels * 4), 1, stride=1, padding=0, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.feature_adaption = FeatureAlign( self.feat_channels, self.feat_channels, kernel_size=3, deformable_groups=self.deformable_groups) self.fovea_cls = nn.Conv2d(int(self.feat_channels * 4), self.cls_out_channels, 3, padding=1)
def __init__(self, with_conv_res=True, *args, **kwargs): super(HTCMaskHead, self).__init__(*args, **kwargs) self.with_conv_res = with_conv_res if self.with_conv_res: self.conv_res = ConvModule(self.conv_out_channels, self.conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
def _init_layers(self): self.tl_fadp = DeformConv(self.in_channels, self.in_channels, 3, 1, 1) self.br_fadp = DeformConv(self.in_channels, self.in_channels, 3, 1, 1) self.mid_tl_fadp = DeformConv(self.in_channels, self.in_channels, 3, 1, 1) self.mid_br_fadp = DeformConv(self.in_channels, self.in_channels, 3, 1, 1) self.tl_offset = nn.Conv2d(2, 18, 1, bias=False) self.br_offset = nn.Conv2d(2, 18, 1, bias=False) self.mid_tl_offset = nn.Conv2d(2, 18, 1, bias=False) self.mid_br_offset = nn.Conv2d(2, 18, 1, bias=False) self.tl_pool = TopLeftPool(self.in_channels) self.br_pool = BottomRightPool(self.in_channels) self.mid_tl_pool = TopLeftPool(self.in_channels) self.mid_br_pool = BottomRightPool(self.in_channels) self.tl_heat = make_kp_layer(out_dim=self.num_classes) self.br_heat = make_kp_layer(out_dim=self.num_classes) self.tl_off_c = make_kp_layer(out_dim=2) self.br_off_c = make_kp_layer(out_dim=2) self.tl_off_c_2 = make_kp_layer(out_dim=2) self.br_off_c_2 = make_kp_layer(out_dim=2) self.tl_off = make_kp_layer(out_dim=2) self.br_off = make_kp_layer(out_dim=2) # middle supervision self.mid_tl_heat = make_kp_layer(out_dim=self.num_classes) self.mid_br_heat = make_kp_layer(out_dim=self.num_classes) self.mid_tl_off_c = make_kp_layer(out_dim=2) self.mid_br_off_c = make_kp_layer(out_dim=2) self.mid_tl_off_c_2 = make_kp_layer(out_dim=2) self.mid_br_off_c_2 = make_kp_layer(out_dim=2) self.mid_tl_off = make_kp_layer(out_dim=2) self.mid_br_off = make_kp_layer(out_dim=2) if self.with_mask: for i in range(4): self.convs.append( ConvModule(self.in_channels, self.in_channels, 3, padding=1) ) self.mid_convs.append( ConvModule(self.in_channels, self.in_channels, 3, padding=1) ) #self.conv_logits = nn.Conv2d(self.in_channels, 81, 1) #self.mid_conv_logits = nn.Conv2d(self.in_channels, 81, 1) self.conv_logits = nn.Conv2d(self.in_channels, self.num_classes+1, 1) self.mid_conv_logits = nn.Conv2d(self.in_channels, self.num_classes+1, 1)
def _init_layers(self): # Build list of intermediate convolutions self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule(chn, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=None, act_cfg=None)) self.reg_convs.append( ConvModule(chn, self.feat_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=None, act_cfg=None)) # Build list of batchnorms self.cls_bns = nn.ModuleList() self.reg_bns = nn.ModuleList() for depth_idx in range(self.stacked_convs): cls_bns = nn.ModuleList() reg_bns = nn.ModuleList() chn = self.in_channels if (depth_idx == 0) else self.feat_channels for level_idx in range(self.num_levels): cls_bns.append( BnModule(chn, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) reg_bns.append( BnModule(chn, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) self.cls_bns.append(cls_bns) self.reg_bns.append(reg_bns) # Build final convolutions self.predict_cls = ConvModule(self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=None, act_cfg=None) self.predict_reg = ConvModule(self.feat_channels, self.num_anchors * 4, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=None, act_cfg=None)
def __init__(self, num_ins, fusion_level, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, ignore_label=255, loss_weight=0.2, conv_cfg=None, norm_cfg=None): super(FusedSemanticHead, self).__init__() self.num_ins = num_ins self.fusion_level = fusion_level self.num_convs = num_convs self.in_channels = in_channels self.conv_out_channels = conv_out_channels self.num_classes = num_classes self.ignore_label = ignore_label self.loss_weight = loss_weight self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.fp16_enabled = False self.lateral_convs = nn.ModuleList() for i in range(self.num_ins): self.lateral_convs.append( ConvModule( self.in_channels, self.in_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, inplace=False)) self.convs = nn.ModuleList() for i in range(self.num_convs): in_channels = self.in_channels if i == 0 else conv_out_channels self.convs.append( ConvModule( in_channels, conv_out_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_embedding = ConvModule( conv_out_channels, conv_out_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1) self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
def _init_layers(self): """Initialize each of the layers needed.""" self.cls_convs = nn.ModuleList() self.energy_convs = None if not self.split_convs else nn.ModuleList() self.reg_convs = nn.ModuleList() # Create the stacked convolutions for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels # Make the different convolution stacks self.cls_convs.append( ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) if self.split_convs: self.energy_convs.append( ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.reg_convs.append( ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) # Classifier convolution self.wfcos_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1) # Bounding box regression convolution self.wfcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1) # Energy map convolution self.wfcos_energy = nn.Conv2d(self.feat_channels, self.max_energy, 1, padding=0) # Scaling factor for the different heads self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def __init__( self, in_channels, mid_channels, out_channels, num_layers, start_level, # 0 means P3 of FPN end_level, num_classes, strides=[8, 16, 32, 64, 128], semantic_loss_on=False, conv_cfg=None, norm_cfg=None): super(MaskFeatHead, self).__init__() self.in_channels = in_channels self.mid_channels = mid_channels self.out_channels = out_channels self.num_layers = num_layers self.start_level = start_level self.end_level = end_level self.num_classes = num_classes self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.strides = strides self.semantic_loss_on = semantic_loss_on # parallel self.refine = nn.ModuleList() for _ in range(self.start_level, self.end_level + 1): self.refine.append( ConvModule( self.in_channels, # 256 self.mid_channels, # 128 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) # together self.tower = nn.ModuleList() # together for i in range(self.num_layers): self.tower.append( ConvModule(self.mid_channels, self.mid_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.tower.append(nn.Conv2d(self.mid_channels, self.out_channels, 1))
def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, )) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, )) self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1) self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2, 1) self.feature_adaption_cls = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deformable_groups=self.deformable_groups, ) self.feature_adaption_reg = FeatureAdaption( self.feat_channels, self.feat_channels, kernel_size=3, deformable_groups=self.deformable_groups, ) self.retina_cls = MaskedConv2d(self.feat_channels, self.num_anchors * self.cls_out_channels, 3, padding=1) self.retina_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4, 3, padding=1)
def _init_layers(self): self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, )) self.reg_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, )) pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points self.reppoints_cls_conv = DeformConv( self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad, ) self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels, self.cls_out_channels, 1, 1, 0) self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels, self.point_feat_channels, 3, 1, 1) self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0) self.reppoints_pts_refine_conv = DeformConv( self.feat_channels, self.point_feat_channels, self.dcn_kernel, 1, self.dcn_pad, ) self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels, pts_out_dim, 1, 1, 0)
def __init__(self, in_channels, out_channels, num_outs=5, pooling_type='AVG', conv_cfg=None, norm_cfg=None, with_cp=False, stride=1): super(HRFPN_upsamp, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule(sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, activation=None) self.up_scale = nn.ModuleList() for i in range(self.num_ins): self.up_scale.append( nn.ConvTranspose2d(in_channels[i], in_channels[i], kernel_size=3, stride=2)) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append( ConvModule(out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, activation=None)) if pooling_type == 'MAX': self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d
def _make_stem_layer(self, in_channels): """Build the stem network.""" # Build the first conv and maxpooling layers. self.conv1 = ConvModule(in_channels, 64, kernel_size=7, stride=2, padding=3, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # Build the initial level 2 blocks. self.init_block1 = make_res_layer(self._init_block_fn, 64, int(FILTER_SIZE_MAP[2] * self._filter_size_scale), self._block_repeats, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) self.init_block2 = make_res_layer( self._init_block_fn, int(FILTER_SIZE_MAP[2] * self._filter_size_scale) * 4, int(FILTER_SIZE_MAP[2] * self._filter_size_scale), self._block_repeats, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
def __init__(self, channels,#out channels levels,#4 init=0.5, conv_cfg=None, norm_cfg=None, activation=None, eps=0.0001): super(BiFPNModule, self).__init__() self.activation = activation self.eps = eps self.levels = levels self.bifpn_convs = nn.ModuleList() # weighted self.w1 = nn.Parameter(torch.Tensor(2, levels).fill_(init))#(2,4) self.relu1 = nn.ReLU() self.w2 = nn.Parameter(torch.Tensor(3, levels - 2).fill_(init))#(3,2) self.relu2 = nn.ReLU() for jj in range(2):#0,1 for i in range(self.levels-1): # 0,1,2 fpn_conv = nn.Sequential( ConvModule( channels, channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=self.activation, inplace=False) ) self.bifpn_convs.append(fpn_conv)#6 fpn_conv
def __init__(self, in_channels, out_channels, num_ins, conv_cfg=None, norm_cfg=None, separable_conv=True, act_cfg=None, eps=0.0001): super(WeightedInputConv_V2, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.num_ins = num_ins self.eps = eps if separable_conv: _, bn_layer = build_norm_layer(norm_cfg, out_channels) self.conv_op = nn.Sequential( SeparableConv(in_channels, out_channels, bias=True, relu=False), bn_layer) else: self.conv_op = ConvModule(in_channels, out_channels, 3, padding=1, conv_cfg=None, norm_cfg=norm_cfg, act_cfg=None, inplace=False) # edge weight and swish self.weight = nn.Parameter(torch.Tensor(self.num_ins).fill_(1.0)) self._swish = Swish()
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, norm_cfg=dict(type='BN', momentum=0.003, eps=1e-4, requires_grad=True), activation=None, bias=False): super(SeparableConv2d, self).__init__() self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels, bias=False) self.pointwise = ConvModule(in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None, bias=bias, inplace=False) if activation == "ReLU": self.act = nn.ReLU() elif activation == "Swish": self.act = MemoryEfficientSwish() else: self.act = None
def __init__(self, in_channels, num_levels, refine_level=2, refine_type=None, conv_cfg=None, norm_cfg=None): super(BFP, self).__init__() assert refine_type in [None, 'conv', 'non_local'] self.in_channels = in_channels self.num_levels = num_levels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.refine_level = refine_level self.refine_type = refine_type assert 0 <= self.refine_level < self.num_levels if self.refine_type == 'conv': self.refine = ConvModule(self.in_channels, self.in_channels, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) elif self.refine_type == 'non_local': self.refine = NonLocal2D(self.in_channels, reduction=1, use_scale=False, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)
def __init__( self, in_channels, out_channels, num_outs=5, pooling_type="AVG", conv_cfg=None, norm_cfg=None, with_cp=False, stride=1, ): super(HRFPN, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.with_cp = with_cp self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.reduction_conv = ConvModule( sum(in_channels), out_channels, kernel_size=1, conv_cfg=self.conv_cfg, act_cfg=None, ) self.fpn_convs = nn.ModuleList() for i in range(self.num_outs): self.fpn_convs.append( ConvModule( out_channels, out_channels, kernel_size=3, padding=1, stride=stride, conv_cfg=self.conv_cfg, act_cfg=None, )) if pooling_type == "MAX": self.pooling = F.max_pool2d else: self.pooling = F.avg_pool2d
def may_apply_conv_1x1(in_channel, out_channel, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=None): if in_channel != out_channel: return ConvModule( in_channel, out_channel, 1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, bias=True, inplace=False) else: return nn.Identity()
def _init_layers(self): super(ATSSEffDetHead, self)._init_layers() self.predict_centerness = ConvModule(self.feat_channels, self.num_anchors, 3, padding=1, conv_cfg=self.conv_cfg, norm_cfg=None, act_cfg=None)
def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False, extra_convs_on_inputs=True, relu_before_extra_convs=False, no_norm_on_lateral=False, conv_cfg=None, norm_cfg=None, act_cfg=None): super(PAFPN, self).__init__(in_channels, out_channels, num_outs, start_level, end_level, add_extra_convs, extra_convs_on_inputs, relu_before_extra_convs, no_norm_on_lateral, conv_cfg, norm_cfg, act_cfg) self.fp16_enabled = False # add extra bottom up pathway self.downsample_convs = nn.ModuleList() self.pafpn_convs = nn.ModuleList() for i in range(self.start_level + 1, self.backbone_end_level): d_conv = ConvModule( out_channels, out_channels, 3, stride=2, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) pafpn_conv = ConvModule( out_channels, out_channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, inplace=False) self.downsample_convs.append(d_conv) self.pafpn_convs.append(pafpn_conv)
def __init__(self, channels=256, with_conv=True, norm_cfg=None): super(MergingCell, self).__init__() self.with_conv = with_conv if self.with_conv: self.conv_out = ConvModule(channels, channels, 3, padding=1, norm_cfg=norm_cfg, order=('act', 'conv', 'norm'))
def _make_endpoints(self): self.endpoint_convs = nn.ModuleDict() for block_spec in self._block_specs: if block_spec.is_output: in_channels = int(FILTER_SIZE_MAP[block_spec.level] * self._filter_size_scale) * 4 self.endpoint_convs[str(block_spec.level)] = ConvModule( in_channels, self._endpoints_num_filters, kernel_size=1, norm_cfg=self.norm_cfg, act_cfg=None)
def _init_layers(self): self.cls_convs = nn.ModuleList() self.wh_convs = nn.ModuleList() self.offset_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.wh_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.offset_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, bias=self.norm_cfg is None)) self.center_hm = nn.Conv2d(self.feat_channels, self.cls_out_channels, 3, padding=1, bias=True) self.center_wh = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True) self.center_offset = nn.Conv2d(self.feat_channels, 2, 3, padding=1, bias=True)
def __init__(self, in_channels, out_channels, num_ins, conv_cfg=None, norm_cfg=None, separable_conv=True, act_cfg=None, eps=0.0001): super(WeightedInputConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg self.num_ins = num_ins self.eps = eps self.separable_conv = separable_conv self.sep_conv = ConvModule(in_channels, in_channels, 3, padding=1, groups=in_channels, conv_cfg=None, norm_cfg=None, act_cfg=None, inplace=False) self.pw_conv = ConvModule(in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), inplace=False) # td_conv = torch.nn.Sequential(td_sep_conv, td_pw_conv) self.weight = nn.Parameter(torch.Tensor(self.num_ins).fill_(1.0)) self.relu = nn.ReLU(inplace=False)