def __init__(self, n_class, backbone='resnet34', aux=False, pretrained_base=False, dilated=True, deep_stem=False, **kwargs): super(ResUnet, self).__init__(backbone, pretrained_base=pretrained_base, dilated=dilated, deep_stem=deep_stem, **kwargs) self.aux = aux self.dilated = dilated channels = self.base_channel if deep_stem or backbone == 'resnest101': conv1_channel = 128 else: conv1_channel = 64 if dilated: self.donv_up3 = decoder_block(channels[0]+channels[3], channels[0]) self.donv_up4 = decoder_block(channels[0]+conv1_channel, channels[0]) else: self.donv_up1 = decoder_block(channels[2] + channels[3], channels[2]) self.donv_up2 = decoder_block(channels[1] + channels[2], channels[1]) self.donv_up3 = decoder_block(channels[0] + channels[1], channels[0]) self.donv_up4 = decoder_block(channels[0] + conv1_channel, channels[0]) if self.aux: self.aux_layer = _FCNHead(256, n_class) self.out_conv = nn.Sequential( nn.Conv2d(channels[0], channels[0], kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(channels[0]), nn.ReLU(), nn.Conv2d(channels[0], n_class, kernel_size=1, bias=False), )
def __init__(self, n_class, backbone='resnet34', aux=False, pretrained_base=False, dilated=False, **kwargs): super(PSPNet, self).__init__(backbone, pretrained_base=pretrained_base, dilated=dilated, **kwargs) self.head = _PSPHead(self.base_channel[-1], n_class, **kwargs) self.aux = aux if self.aux: self.auxlayer = _FCNHead(256, n_class, **kwargs) self.__setattr__('exclusive', ['head', 'auxlayer'] if aux else ['head'])
def __init__(self, n_class, backbone='resnet34', aux=False, pretrained_base=False, dilated=True, deep_stem=False, crop_size=224, **kwargs): super(EMANet, self).__init__(backbone, pretrained_base=pretrained_base, dilated=dilated, deep_stem=deep_stem, **kwargs) self.aux = aux self.dilated = dilated channels = self.base_channel # [256, 512, 1024, 2048] print(channels) if deep_stem or backbone == 'resnest101': conv1_channel = 128 else: conv1_channel = 64 # self.spsp = SPSP(channels[3], scales=[6, 3, 2, 1]) # scales=[6, 3, 2, 1] self.emau = EMAU(channels[0], k=32) if dilated: self.SF1 = AlignModule(channels[3], channels[0]) self.donv_up3 = decoder_block(channels[0] + channels[3], channels[0]) self.SF2 = AlignModule(channels[0], conv1_channel) self.donv_up4 = decoder_block(channels[0] + conv1_channel, channels[0]) self.out_conv = out_conv(channels[0], n_class) else: # self.SF1 = AlignModule(channels[3], channels[2]) self.donv_up1 = decoder_block(channels[2] + channels[3], channels[2]) # self.SF2 = AlignModule(channels[2], channels[1]) self.donv_up2 = decoder_block(channels[1] + channels[2], channels[1]) # self.SF3 = AlignModule(channels[1], channels[0]) self.donv_up3 = decoder_block(channels[0] + channels[1], channels[0]) # self.SF4 = AlignModule(channels[0], channels[0]) self.donv_up4 = decoder_block(channels[0] + conv1_channel, channels[0]) self.out_conv = out_conv(channels[0], n_class) if self.aux: self.aux_layer = _FCNHead(channels[3], n_class)
def __init__(self, in_channel, n_class): super().__init__() self.n_class = n_class self.conv = _FCNHead(in_channel, n_class) inter_channel = in_channel // 2 self.node = 64 self.st1 = squeeze_and_expand(in_channel, self.node, inter_channel) self.st2 = squeeze_and_expand(in_channel, self.node, inter_channel) self.gcn = GCN(inter_channel, int(self.node * 2))
def __init__(self, in_channel, n_class): super().__init__() self.n_class = n_class self.conv = _FCNHead(in_channel, n_class) self.GloRe = nn.ModuleList() for i in range(n_class): self.GloRe.append( GloRe_Unit( in_channel, in_channel // 4, ConvNd=nn.Conv2d, BatchNormNd=nn.BatchNorm2d, ))
def __init__(self, in_channel, n_class): super().__init__() self.n_class = n_class self.conv = _FCNHead(in_channel, n_class) inter_channel = in_channel // 2 self.node = 64 self.st = nn.ModuleList() for i in range(n_class): self.st.append( squeeze_and_expand(in_channel, self.node, inter_channel)) self.gcn = GCN(inter_channel, int(self.node * n_class)) for m in self.modules(): if isinstance(m, nn.Conv2d): init_weights(m, init_type='kaiming') elif isinstance(m, nn.BatchNorm2d): init_weights(m, init_type='kaiming')
def __init__(self, n_class, backbone='resnet34', aux=False, pretrained_base=False, dilated=True, deep_stem=False, **kwargs): super(DF_ResUnet, self).__init__(backbone, pretrained_base=pretrained_base, dilated=dilated, deep_stem=deep_stem, **kwargs) self.aux = aux self.dilated = dilated channels = self.base_channel if deep_stem or backbone == 'resnest101': conv1_channel = 128 else: conv1_channel = 64 if dilated: self.donv_up3 = decoder_block(channels[0] + channels[3], channels[0]) self.donv_up4 = decoder_block(channels[0] + conv1_channel, channels[0]) else: self.donv_up1 = decoder_block(channels[2] + channels[3], channels[2]) self.donv_up2 = decoder_block(channels[1] + channels[2], channels[1]) self.donv_up3 = decoder_block(channels[0] + channels[1], channels[0]) self.donv_up4 = decoder_block(channels[0] + conv1_channel, channels[0]) if self.aux: self.aux_layer = _FCNHead(256, n_class) self.sff = SelFuseFeature_1(channels[0], n_class=n_class)
def __init__(self, n_class, backbone='resnet34', aux=False, pretrained_base=False, dilated=True, deep_stem=False, crop_size=224, **kwargs): super(EMANet, self).__init__(backbone, pretrained_base=pretrained_base, dilated=dilated, deep_stem=deep_stem, **kwargs) self.aux = aux self.dilated = dilated channels = self.base_channel # [256, 512, 1024, 2048] if deep_stem or backbone == 'resnest101': conv1_channel = 128 else: conv1_channel = 64 # self.scale = [(128, 128), (256, 256)] # locate # self.scale = [(64, 64), (128, 128)] # seg 224 # self.conv2 = deep_conv(3, conv1_channel//2, conv1_channel) # self.conv3 = deep_conv(3, conv1_channel//2, conv1_channel) # self.aspp = _ASPP(channels[3], [6, 10, 14], norm_layer=nn.BatchNorm2d, norm_kwargs=None, out_channels=512, **kwargs) self.emau = EMAU(channels[3], k=64) # self.ppm = _PyramidPooling(channels[3], norm_layer=nn.BatchNorm2d, norm_kwargs=None) # self.conv_ppm = conv_bn_relu(channels[3] * 2, channels[2]) if dilated: self.donv_up3 = decoder_block(channels[0] + channels[3], channels[0]) self.donv_up4 = decoder_block(channels[0] + conv1_channel, channels[0]) self.out_conv = out_conv(channels[0] * 3, n_class) else: self.donv_up1 = decoder_block(channels[2] + channels[3], channels[2]) self.donv_up2 = decoder_block(channels[1] + channels[2], channels[1]) self.donv_up3 = decoder_block(channels[0] + channels[1], channels[0]) self.donv_up4 = decoder_block(channels[0] + conv1_channel, channels[0]) self.out_conv1 = conv_bn_relu(channels[2], channels[0] // 2) self.out_conv2 = conv_bn_relu(channels[1], channels[0] // 2) self.out_conv3 = conv_bn_relu(channels[0], channels[0] // 2) self.out_conv = out_conv(channels[0] * 2 + channels[0] // 2, n_class) if type(crop_size) == tuple: self.reco = Reco_module(channels[0], crop_size[0] // 2, crop_size[1] // 2, 64) else: self.reco = Reco_module(channels[0], crop_size // 2, crop_size // 2, 64) if self.aux: self.aux_layer = _FCNHead(channels[3], n_class)
def __init__(self, n_class, backbone='resnet34', aux=False, pretrained_base=False, dilated=False, **kwargs): super(DeepLabV3, self).__init__(backbone, pretrained_base=pretrained_base, dilated=dilated, **kwargs) self.head = _DeepLabHead(self.base_channel[-1], n_class, **kwargs) self.aux = aux if self.aux: self.auxlayer = _FCNHead(256, n_class, **kwargs)