def __init__(self, dw_act_cfg=None, **kwargs): super(DepthwiseSeparableFCNHead, self).__init__(**kwargs) self.convs[0] = DepthwiseSeparableConvModule( self.in_channels, self.channels, kernel_size=self.kernel_size, padding=self.kernel_size // 2, norm_cfg=self.norm_cfg, dw_act_cfg=dw_act_cfg) for i in range(1, self.num_convs): self.convs[i] = DepthwiseSeparableConvModule( self.channels, self.channels, kernel_size=self.kernel_size, padding=self.kernel_size // 2, norm_cfg=self.norm_cfg, dw_act_cfg=dw_act_cfg) if self.concat_input: self.conv_cat = DepthwiseSeparableConvModule( self.in_channels + self.channels, self.channels, kernel_size=self.kernel_size, padding=self.kernel_size // 2, norm_cfg=self.norm_cfg, dw_act_cfg=dw_act_cfg)
def __init__(self, in_channels, conv_cfg=None, norm_cfg=dict(type='BN')): super().__init__() projects = [] num_branchs = len(in_channels) self.in_channels = in_channels[::-1] for i in range(num_branchs): if i != num_branchs - 1: projects.append( DepthwiseSeparableConvModule( in_channels=self.in_channels[i], out_channels=self.in_channels[i + 1], kernel_size=3, stride=1, padding=1, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), dw_act_cfg=None, pw_act_cfg=dict(type='ReLU'))) else: projects.append( DepthwiseSeparableConvModule( in_channels=self.in_channels[i], out_channels=self.in_channels[i], kernel_size=3, stride=1, padding=1, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), dw_act_cfg=None, pw_act_cfg=dict(type='ReLU'))) self.projects = nn.ModuleList(projects)
def __init__(self, c1_in_channels, c1_channels, **kwargs): super(DepthwiseSeparableASPPHead, self).__init__(**kwargs) assert c1_in_channels >= 0 self.aspp_modules = DepthwiseSeparableASPPModule( dilations=self.dilations, in_channels=self.in_channels, channels=self.channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) if c1_in_channels > 0: self.c1_bottleneck = ConvModule(c1_in_channels, c1_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) else: self.c1_bottleneck = None self.sep_bottleneck = nn.Sequential( DepthwiseSeparableConvModule(self.channels + c1_channels, self.channels, 3, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg), DepthwiseSeparableConvModule(self.channels, self.channels, 3, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
def build_project(self, in_channels, channels, num_convs, use_conv_module, conv_cfg, norm_cfg, act_cfg): """Build projection layer for key/query/value/out.""" if use_conv_module: convs = [ DepthwiseSeparableConvModule(in_channels, channels, 3, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) ] for _ in range(num_convs - 1): convs.append( DepthwiseSeparableConvModule(channels, channels, 3, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) else: convs = [nn.Conv2d(in_channels, channels, 1)] for _ in range(num_convs - 1): convs.append(nn.Conv2d(channels, channels, 1)) if len(convs) > 1: convs = nn.Sequential(*convs) else: convs = convs[0] return convs
def __init__(self, in_channels, dw_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU')): super(LearningToDownsample, self).__init__() self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.act_cfg = act_cfg dw_channels1 = dw_channels[0] dw_channels2 = dw_channels[1] self.conv = ConvModule(in_channels, dw_channels1, 3, stride=2, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.dsconv1 = DepthwiseSeparableConvModule(dw_channels1, dw_channels2, kernel_size=3, stride=2, padding=1, norm_cfg=self.norm_cfg) self.dsconv2 = DepthwiseSeparableConvModule(dw_channels2, out_channels, kernel_size=3, stride=2, padding=1, norm_cfg=self.norm_cfg)
def __init__(self, c1_in_channels, c1_channels, **kwargs): super(ConsistDepthwiseSeparableASPPHead, self).__init__(**kwargs) assert c1_in_channels >= 0 self.aspp_modules = DepthwiseSeparableASPPModule( dilations=self.dilations, in_channels=self.in_channels, channels=self.channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) if c1_in_channels > 0: self.c1_bottleneck = ConvModule(c1_in_channels, c1_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) else: self.c1_bottleneck = None self.sep_bottleneck = nn.Sequential( DepthwiseSeparableConvModule(self.channels + c1_channels, self.channels, 3, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg), DepthwiseSeparableConvModule(self.channels, self.channels, 3, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)) # constrain the predictions over differen scales consistent self.conv_seg_scale1 = nn.Conv2d(self.channels, num_classes, kernel_size=1) self.conv_seg_scale2 = nn.Conv2d(self.channels, num_classes, kernel_size=1) self.conv_seg_scale3 = nn.Conv2d(self.channels, num_classes, kernel_size=1) self.conv_seg_scale4 = nn.Conv2d(self.channels, num_classes, kernel_size=1)
def __init__(self, *args, **kwargs): super(DepthwiseSeparableObjectAttentionBlock, self).__init__(*args, **kwargs) self.bottleneck = DepthwiseSeparableConvModule(self.in_channels * 2, self.in_channels, 3, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
def __init__(self, **kwargs): super(DepthwiseSeparableASPPModule, self).__init__(**kwargs) for i, dilation in enumerate(self.dilations): if dilation > 1: self[i] = DepthwiseSeparableConvModule(self.in_channels, self.channels, 3, dilation=dilation, padding=dilation, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
def __init__(self, in_channels=(512, 1024, 2048), mid_channels=512, start_level=0, end_level=-1, dilations=(1, 2, 4, 8), align_corners=False, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), init_cfg=None): super(JPU, self).__init__(init_cfg=init_cfg) assert isinstance(in_channels, tuple) assert isinstance(dilations, tuple) self.in_channels = in_channels self.mid_channels = mid_channels self.start_level = start_level self.num_ins = len(in_channels) if end_level == -1: self.backbone_end_level = self.num_ins else: self.backbone_end_level = end_level assert end_level <= len(in_channels) self.dilations = dilations self.align_corners = align_corners self.conv_layers = nn.ModuleList() self.dilation_layers = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): conv_layer = nn.Sequential( ConvModule(self.in_channels[i], self.mid_channels, kernel_size=3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.conv_layers.append(conv_layer) for i in range(len(dilations)): dilation_layer = nn.Sequential( DepthwiseSeparableConvModule( in_channels=(self.backbone_end_level - self.start_level) * self.mid_channels, out_channels=self.mid_channels, kernel_size=3, stride=1, padding=dilations[i], dilation=dilations[i], dw_norm_cfg=norm_cfg, dw_act_cfg=None, pw_norm_cfg=norm_cfg, pw_act_cfg=act_cfg)) self.dilation_layers.append(dilation_layer)
def __init__(self, feature_key, low_level_channels, low_level_key, low_level_channels_project, decoder_channels, **kargs): super(FpnDecodeHead, self).__init__(**kargs) self.feature_key = feature_key self.decoder_stage = len(low_level_channels) assert self.decoder_stage == len(low_level_key) assert self.decoder_stage == len(low_level_channels_project) self.low_level_key = low_level_key self.bottleneck = DepthwiseSeparableConvModule( self.in_channels[feature_key], self.channels, 3, padding=1, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) fuse_bottleneck = partial(DepthwiseSeparableConvModule, kernel_size=5, padding=2, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) # Transform low-level feature project = [] # Fuse fuse = [] # Top-down direction, i.e. starting from largest stride for i in range(self.decoder_stage): project.append( nn.Sequential( nn.Conv2d(low_level_channels[i], low_level_channels_project[i], 1, bias=False), nn.BatchNorm2d(low_level_channels_project[i]), nn.ReLU(inplace=True))) if i == 0: fuse_in_channels = self.channels + low_level_channels_project[i] else: fuse_in_channels = decoder_channels + low_level_channels_project[ i] fuse.append(fuse_bottleneck( fuse_in_channels, decoder_channels, )) self.project = nn.ModuleList(project) self.fuse = nn.ModuleList(fuse) self.conv_seg = nn.Conv2d(decoder_channels, self.num_classes, kernel_size=1)
def _get_conv_module(self, in_channel, out_channel): if not self.dw_conv: conv = ConvModule(in_channel, out_channel, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg) else: conv = DepthwiseSeparableConvModule(in_channel, out_channel, 3, stride=1, padding=1, pw_norm_cfg=self.norm_cfg, dw_norm_cfg=self.norm_cfg) return conv
def __init__(self, out_channels, norm_cfg=dict(type='BN')): # Protect mutable default arguments norm_cfg = cp.deepcopy(norm_cfg) super().__init__() self.out_channels = out_channels self.global_pooling = nn.AdaptiveAvgPool2d((1, 1)) self.middle_path = nn.Sequential( Linear(self.out_channels, self.out_channels), build_norm_layer(dict(type='BN1d'), out_channels)[1], build_activation_layer(dict(type='ReLU')), Linear(self.out_channels, self.out_channels), build_norm_layer(dict(type='BN1d'), out_channels)[1], build_activation_layer(dict(type='ReLU')), build_activation_layer(dict(type='Sigmoid'))) self.bottom_path = nn.Sequential( ConvModule( self.out_channels, self.out_channels, kernel_size=1, stride=1, padding=0, norm_cfg=norm_cfg, inplace=False), DepthwiseSeparableConvModule( self.out_channels, 1, kernel_size=9, stride=1, padding=4, norm_cfg=norm_cfg, inplace=False), build_activation_layer(dict(type='Sigmoid'))) self.conv_bn_relu_prm_1 = ConvModule( self.out_channels, self.out_channels, kernel_size=3, stride=1, padding=1, norm_cfg=norm_cfg, inplace=False)
def __init__(self, ocr_channels, c1_in_channels, c1_channels, scale=1, **kwargs): super(OCRPlusHead, self).__init__(**kwargs) assert c1_in_channels >= 0 self.ocr_channels = ocr_channels self.scale = scale self.object_context_block = DepthwiseSeparableObjectAttentionBlock( self.channels, self.ocr_channels, self.scale, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) self.spatial_gather_module = SpatialGatherModule(self.scale) self.bottleneck = DepthwiseSeparableConvModule(self.in_channels, self.channels, 5, padding=2, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) # self.fuse_bottleneck = nn.Sequential( # DepthwiseSeparableConvModule( # self.channels + c1_channels, # self.channels, # 3, # padding=1, # norm_cfg=self.norm_cfg, # act_cfg=self.act_cfg), # DepthwiseSeparableConvModule( # self.channels, # self.channels, # 3, # padding=1, # norm_cfg=self.norm_cfg, # act_cfg=self.act_cfg)) self.fuse_bottleneck = DepthwiseSeparableConvModule( self.channels + c1_channels, self.channels, 5, padding=2, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) if c1_in_channels > 0: self.c1_bottleneck = ConvModule(c1_in_channels, c1_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg) else: self.c1_bottleneck = None
def __init__(self, out_channels=128, align_corners=False, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), init_cfg=None): super(BGALayer, self).__init__(init_cfg=init_cfg) self.out_channels = out_channels self.align_corners = align_corners self.detail_dwconv = nn.Sequential( DepthwiseSeparableConvModule( in_channels=self.out_channels, out_channels=self.out_channels, kernel_size=3, stride=1, padding=1, dw_norm_cfg=norm_cfg, dw_act_cfg=None, pw_norm_cfg=None, pw_act_cfg=None, )) self.detail_down = nn.Sequential( ConvModule( in_channels=self.out_channels, out_channels=self.out_channels, kernel_size=3, stride=2, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None), nn.AvgPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=False)) self.semantic_conv = nn.Sequential( ConvModule( in_channels=self.out_channels, out_channels=self.out_channels, kernel_size=3, stride=1, padding=1, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)) self.semantic_dwconv = nn.Sequential( DepthwiseSeparableConvModule( in_channels=self.out_channels, out_channels=self.out_channels, kernel_size=3, stride=1, padding=1, dw_norm_cfg=norm_cfg, dw_act_cfg=None, pw_norm_cfg=None, pw_act_cfg=None, )) self.conv = ConvModule( in_channels=self.out_channels, out_channels=self.out_channels, kernel_size=3, stride=1, padding=1, inplace=True, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg, )
def __init__(self, in_channels, out_channels, exp_ratio=6, stride=1, conv_cfg=None, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU'), init_cfg=None): super(GELayer, self).__init__(init_cfg=init_cfg) mid_channel = in_channels * exp_ratio self.conv1 = ConvModule( in_channels=in_channels, out_channels=in_channels, kernel_size=3, stride=1, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg) if stride == 1: self.dwconv = nn.Sequential( # ReLU in ConvModule not shown in paper ConvModule( in_channels=in_channels, out_channels=mid_channel, kernel_size=3, stride=stride, padding=1, groups=in_channels, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)) self.shortcut = None else: self.dwconv = nn.Sequential( ConvModule( in_channels=in_channels, out_channels=mid_channel, kernel_size=3, stride=stride, padding=1, groups=in_channels, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None), # ReLU in ConvModule not shown in paper ConvModule( in_channels=mid_channel, out_channels=mid_channel, kernel_size=3, stride=1, padding=1, groups=mid_channel, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg), ) self.shortcut = nn.Sequential( DepthwiseSeparableConvModule( in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=1, dw_norm_cfg=norm_cfg, dw_act_cfg=None, pw_norm_cfg=norm_cfg, pw_act_cfg=None, )) self.conv2 = nn.Sequential( ConvModule( in_channels=mid_channel, out_channels=out_channels, kernel_size=1, stride=1, padding=0, bias=False, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None, )) self.act = build_activation_layer(act_cfg)