def __init__(self, nclass, in_channels, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_ACFHead, self).__init__() with self.name_scope(): self.aspp = ASPPModule(512, in_channels, norm_layer, norm_kwargs, rates=(12, 24, 36), pool_branch=False) self.coarse_head = FCNHead(nclass, 512, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.acf = _ACFModule(512, 512, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.head = FCNHead(nclass, 1024, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
def __init__(self, nclass, backbone='mobilenet_v2_1_0', aux=True, height=None, width=None, base_size=520, crop_size=480, pretrained_base=True, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(FCNMobileNet, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, norm_layer=norm_layer, norm_kwargs=norm_kwargs) with self.name_scope(): self.head = FCNHead(nclass=nclass, in_channels=self.stage_channels[3], norm_layer=norm_layer, norm_kwargs=norm_kwargs) if self.aux: self.aux_head = FCNHead(nclass=nclass, in_channels=self.stage_channels[2], norm_layer=norm_layer, norm_kwargs=norm_kwargs)
def __init__(self, nclass, decoder_capacity, input_height, input_width, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_LadderHead, self).__init__() with self.name_scope(): self.conv_c4 = ConvBlock(decoder_capacity, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs, activation='relu') self.fusion_c3 = LateralFusion(decoder_capacity, input_height // 16, input_width // 16, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_c2 = LateralFusion(decoder_capacity, input_height // 8, input_width // 8, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_c1 = LateralFusion(decoder_capacity, input_height // 4, input_width // 4, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.seg_head = FCNHead(nclass, decoder_capacity, norm_layer, norm_kwargs)
def __init__(self, nclass, backbone='densnet169', aux=True, height=None, width=None, base_size=520, crop_size=480, pretrained_base=False, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(LadderDenseNet, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, norm_layer=norm_layer, norm_kwargs=norm_kwargs) decoder_capacity = 128 with self.name_scope(): self.head = _LadderHead(nclass, decoder_capacity, norm_layer=norm_layer, norm_kwargs=norm_kwargs, input_height=self._up_kwargs['height'], input_width=self._up_kwargs['width']) if self.aux: self.auxlayer = FCNHead(nclass, in_channels=decoder_capacity, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
def __init__(self, nclass, height=60, width=60, norm_layer=nn.BatchNorm, norm_kwargs=None, use_sigmoid=True): super(_AttentionHead, self).__init__() self.sigmoid = use_sigmoid self.up_kwargs = {'height': height, 'width': width} with self.name_scope(): self.seg_head = FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.conv3x3 = ConvBlock(512, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs, activation='relu') if use_sigmoid: self.conv1x1 = nn.Conv2D(1, 1, in_channels=512) else: self.conv1x1 = nn.Conv2D(2, 1, in_channels=512)
def __init__(self, nclass, input_height, input_width, capacity=256, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_NextHead, self).__init__() with self.name_scope(): self.conv_c4 = ConvBlock(capacity, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_16x = LateralFusion(capacity, input_height // 16, input_width // 16, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_8x = LateralFusion(capacity, input_height // 8, input_width // 8, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.seg_head = FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
def __init__(self, nclass, backbone='resnet50', aux=False, height=None, width=None, base_size=520, crop_size=480, pretrained_base=True, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(UNet, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, dilate=False, norm_layer=norm_layer, norm_kwargs=norm_kwargs) with self.name_scope(): self.head = _NextHead(nclass, self._up_kwargs['height'], self._up_kwargs['width'], norm_layer=norm_layer, norm_kwargs=norm_kwargs) if self.aux: self.auxlayer = FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
def __init__(self, nclass, backbone='resnet50', aux=True, height=None, width=None, base_size=520, crop_size=480, pretrained_base=False, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(PSPNet, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, dilate=True, norm_layer=norm_layer, norm_kwargs=norm_kwargs) with self.name_scope(): self.head = _PyramidHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs, height=self._up_kwargs['height'] // 8, width=self._up_kwargs['width'] // 8) if self.aux: self.auxlayer = FCNHead(nclass=nclass, in_channels=1024, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
def __init__(self, nclass, in_channels, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_DeepLabHead, self).__init__() with self.name_scope(): self.aspp = ASPPModule(256, in_channels, norm_layer, norm_kwargs, rates=(12, 24, 36)) self.conv_c1 = ConvModule2d(48, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.conv3x3 = ConvModule2d(256, 3, 1, 1, in_channels=304, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.drop = nn.Dropout(0.5) self.head = FCNHead(nclass, 256, norm_layer, norm_kwargs)
def __init__(self, nclass, backbone='resnet18', aux=True, height=None, width=None, base_size=520, crop_size=480, pretrained_base=False, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(SwiftResNet, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, dilate=False, norm_layer=norm_layer, norm_kwargs=norm_kwargs) with self.name_scope(): self.head = _SwiftNetHead(nclass, self.base_channels[3], norm_layer=norm_layer, norm_kwargs=norm_kwargs, input_height=self._up_kwargs['height'], input_width=self._up_kwargs['width']) if self.aux: self.auxlayer = FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
def __init__(self, nclass, backbone='resnet50', aux=False, height=None, width=None, base_size=520, crop_size=480, pretrained_base=True, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(FaPN, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, dilate=False, norm_layer=norm_layer, norm_kwargs=norm_kwargs) with self.name_scope(): self.head = _FaPNHead(nclass, self.stage_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs) if self.aux: self.aux_head = FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
def __init__(self, nclass, backbone='resnet50', aux=True, height=None, width=None, base_size=520, crop_size=480, pretrained_base=False, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(DenseASPP, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, dilate=True, norm_layer=norm_layer, norm_kwargs=norm_kwargs) with self.name_scope(): self.head = _DenseASPPHead(nclass, 2048, norm_layer, norm_kwargs) if self.aux: self.auxlayer = FCNHead(nclass, 1024, norm_layer, norm_kwargs)
def __init__(self, nclass, in_channels, norm_layer=nn.BatchNorm, norm_kwargs=None, height=60, width=60): super(_DeepLabHead, self).__init__() self.up_kwargs = {'height': height, 'width': width} with self.name_scope(): self.aspp = ASPP(256, in_channels, norm_layer, norm_kwargs, height // 2, width // 2, atrous_rates=(12, 24, 36)) self.conv_c1 = ConvBlock(48, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.conv3x3 = ConvBlock(256, 3, 1, 1, in_channels=304, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.drop = nn.Dropout(0.5) self.head = FCNHead(nclass, 256, norm_layer, norm_kwargs)
def __init__(self, nclass, capacity=512, attention=False, drop=.5, norm_layer=nn.BatchNorm, norm_kwargs=None, height=120, width=120): super(_CAHead, self).__init__() self.up_kwargs = {'height': height, 'width': width} self.attention = attention self.gamma = 1.0 height = height // 2 width = width // 2 with self.name_scope(): # Chained Context Aggregation Module self.gp = GlobalFlow(capacity, 2048, height, width, norm_layer, norm_kwargs) self.cp1 = _ContextFlow(capacity, stride=2, norm_layer=norm_layer, norm_kwargs=norm_kwargs, height=height, width=width) self.cp2 = _ContextFlow(capacity, stride=4, norm_layer=norm_layer, norm_kwargs=norm_kwargs, height=height, width=width) self.cp3 = _ContextFlow(capacity, stride=8, norm_layer=norm_layer, norm_kwargs=norm_kwargs, height=height, width=width) self.cp4 = _ContextFlow(capacity, stride=16, norm_layer=norm_layer, norm_kwargs=norm_kwargs, height=height, width=width) if self.attention: self.selection = _FeatureSelection(256, in_channels=capacity, norm_layer=norm_layer, norm_kwargs=norm_kwargs) else: self.proj = ConvBlock(256, 3, 1, 1, in_channels=capacity, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.drop = nn.Dropout(drop) if drop else None # decoder self.decoder = ConvBlock(48, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.conv3x3 = ConvBlock(256, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) # segmentation head self.seg_head = FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
def __init__(self, nclass, in_channels, input_height, input_width, capacity=256, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_SwiftNetHead, self).__init__() with self.name_scope(): self.ppool = PyramidPooling(in_channels, input_height // 32, input_width // 32, norm_layer, norm_kwargs) self.conv_c4 = ConvBlock(capacity, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_c3 = LateralFusion(capacity, input_height // 16, input_width // 16, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_c2 = LateralFusion(capacity, input_height // 8, input_width // 8, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_c1 = LateralFusion(capacity, input_height // 4, input_width // 4, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.seg_head = FCNHead(nclass, capacity, norm_layer, norm_kwargs)
def __init__(self, nclass, backbone='xception65', aux=True, height=None, width=None, base_size=520, crop_size=480, pretrained_base=True, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(DeepLabv3Plus, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, dilate=True, norm_layer=norm_layer, norm_kwargs=norm_kwargs) with self.name_scope(): self.head = _DeepLabHead(nclass, 2048, norm_layer, norm_kwargs, height=self._up_kwargs['height'] // 4, width=self._up_kwargs['width'] // 4) if self.aux: self.auxlayer = FCNHead(nclass, 728, norm_layer, norm_kwargs)
def __init__(self, nclass, backbone='resnet50', aux=True, height=None, width=None, base_size=520, crop_size=480, pretrained_base=False, dilate=True, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(DeepLabv3, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, dilate, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.output_stride = 8 if dilate else 32 with self.name_scope(): self.head = _DeepLabHead(nclass, self.stage_channels[3], norm_layer, norm_kwargs) if self.aux: self.aux_head = FCNHead(nclass, self.stage_channels[2], norm_layer, norm_kwargs)
def __init__(self, nclass, channels, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_DFPModule, self).__init__() with self.name_scope(): self.blk_4 = ConvModule2d(channels, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.blk_3 = ConvModule2d(channels, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.blk_2 = ConvModule2d(channels, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.blk_1 = ConvModule2d(channels, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.head = FCNHead(nclass, channels * 5, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
def __init__(self, nclass, input_height, input_width, capacity=128, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_SwiftNetHead, self).__init__() with self.name_scope(): self.conv1x1 = ConvBlock(capacity, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_32x = _LateralFusion(capacity, input_height // 32, input_width // 32, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_16x = _LateralFusion(capacity, input_height // 16, input_width // 16, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_8x = _LateralFusion(capacity, input_height // 8, input_width // 8, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.final = _LateralFusion(capacity, input_height // 4, input_width // 4, True, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.seg_head = FCNHead(nclass, capacity, norm_layer, norm_kwargs)
def __init__(self, nclass, backbone='resnet18', aux=True, height=None, width=None, base_size=520, crop_size=480, pretrained_base=False, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(BiSeNet, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, dilate=False, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.head = _BiSeNetHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs, height=self._up_kwargs['height'] // 8, width=self._up_kwargs['width'] // 8) if self.aux: self.auxlayer = FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs, drop_out=.0)
def __init__(self, nclass, in_channels, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_DenseASPPHead, self).__init__() with self.name_scope(): self.dense_aspp = DenseASPPBlock(256, in_channels, norm_layer, norm_kwargs) self.head = FCNHead(nclass, 256, norm_layer, norm_kwargs)
def __init__(self, nclass, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_AttaNetHead, self).__init__() with self.name_scope(): self.afm = _AttentionFusionModule(128, norm_layer, norm_kwargs) self.conv3x3 = ConvModule2d(128, 3, 1, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.sam = _StripAttentionModule(128, norm_layer, norm_kwargs) self.seg = FCNHead(nclass, 128, norm_layer, norm_kwargs)
def __init__(self, nclass, in_channels, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_DeepLabHead, self).__init__() with self.name_scope(): self.aspp = ASPPModule(256, in_channels, norm_layer, norm_kwargs, rates=(6, 12, 18)) self.head = FCNHead(nclass, 256, norm_layer, norm_kwargs)
def __init__(self, nclass, backbone='xception39', aux=True, height=None, width=None, base_size=520, crop_size=480, pretrained_base=False, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(BiSeNetX, self).__init__(nclass, aux, height, width, base_size, crop_size) assert backbone == 'xception39', 'support only xception39 as the backbone.' pretrained = xception39(pretrained_base, norm_layer=norm_layer, norm_kwargs=norm_kwargs) with self.name_scope(): self.conv = pretrained.conv1 self.max_pool = pretrained.maxpool self.layer1 = pretrained.layer1 self.layer2 = pretrained.layer2 self.layer3 = pretrained.layer3 self.head = _BiSeNetHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs) if self.aux: self.aux_head = HybridConcurrentIsolate() self.aux_head.add( FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs), FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs))
def __init__(self, nclass, backbone='resnet18', aux=True, height=None, width=None, base_size=520, crop_size=480, pretrained_base=False, norm_layer=nn.BatchNorm, norm_kwargs=None, **kwargs): super(BiSeNetR, self).__init__(nclass, aux, backbone, height, width, base_size, crop_size, pretrained_base, dilate=False, norm_layer=norm_layer, norm_kwargs=norm_kwargs) with self.name_scope(): self.head = _BiSeNetHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs) if self.aux: self.aux_head = HybridConcurrentIsolate() self.aux_head.add( FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs), FCNHead(nclass, norm_layer=norm_layer, norm_kwargs=norm_kwargs))
def __init__(self, nclass, in_channels, capacity=256, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_SwiftNetHead, self).__init__() with self.name_scope(): self.ppool = PPModule(in_channels, norm_layer, norm_kwargs) self.conv_c4 = ConvModule2d(capacity, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_c3 = LateralFusion(capacity, norm_layer, norm_kwargs) self.fusion_c2 = LateralFusion(capacity, norm_layer, norm_kwargs) self.fusion_c1 = LateralFusion(capacity, norm_layer, norm_kwargs) self.seg_head = FCNHead(nclass, capacity, norm_layer, norm_kwargs)
def __init__(self, nclass, in_channels, norm_layer=nn.BatchNorm, norm_kwargs=None, height=60, width=60): super(_DeepLabHead, self).__init__() with self.name_scope(): self.aspp = ASPP(256, in_channels, norm_layer, norm_kwargs, height, width, atrous_rates=(12, 24, 36)) self.head = FCNHead(nclass, 256, norm_layer, norm_kwargs)
def __init__(self, nclass, height, width, norm_layer=nn.BatchNorm, norm_kwargs=None, activation='relu'): super(_PyramidHead, self).__init__() with self.name_scope(): self.pool = PyramidPooling(2048, height, width, norm_layer, norm_kwargs, activation, reduction=4) self.seg_head = FCNHead(nclass, 4096, norm_layer, norm_kwargs)
def __init__(self, nclass, capacity=128, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_SwiftNetHead, self).__init__() with self.name_scope(): self.conv1x1 = ConvModule2d(capacity, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_32x = _LateralFusion(capacity, norm_layer, norm_kwargs) self.fusion_16x = _LateralFusion(capacity, norm_layer, norm_kwargs) self.fusion_8x = _LateralFusion(capacity, norm_layer, norm_kwargs) self.final = _LateralFusion(capacity, norm_layer, norm_kwargs, is_final=True) self.seg_head = FCNHead(nclass, capacity, norm_layer, norm_kwargs)
def __init__(self, nclass, decoder_capacity, norm_layer=nn.BatchNorm, norm_kwargs=None): super(_LadderHead, self).__init__() with self.name_scope(): self.conv_c4 = ConvModule2d(decoder_capacity, 1, norm_layer=norm_layer, norm_kwargs=norm_kwargs) self.fusion_c3 = LateralFusion(decoder_capacity, norm_layer, norm_kwargs) self.fusion_c2 = LateralFusion(decoder_capacity, norm_layer, norm_kwargs) self.fusion_c1 = LateralFusion(decoder_capacity, norm_layer, norm_kwargs) self.seg_head = FCNHead(nclass, decoder_capacity, norm_layer, norm_kwargs)